; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-BITS-UNKNOWN ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfh,+zvfbfmin -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-BITS-256 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfh,+zvfbfmin -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-BITS-512 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-BITS-UNKNOWN ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfh,+zvfbfmin -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-BITS-256 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfh,+zvfbfmin -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-BITS-512 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-BITS-UNKNOWN ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfhmin,+zvfbfmin -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-BITS-256 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfhmin,+zvfbfmin -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-BITS-512 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-BITS-UNKNOWN ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfhmin,+zvfbfmin -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-BITS-256 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfhmin,+zvfbfmin -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-BITS-512 ; ; VECTOR_REVERSE - masks ; define @reverse_nxv2i1( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv2i1: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v10, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv2i1: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: srli a0, a0, 2 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9 ; RV32-BITS-256-NEXT: vmsne.vi v0, v10, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv2i1: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: srli a0, a0, 2 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9 ; RV32-BITS-512-NEXT: vmsne.vi v0, v10, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv2i1: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v10, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv2i1: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: srli a0, a0, 2 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9 ; RV64-BITS-256-NEXT: vmsne.vi v0, v10, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv2i1: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: srli a0, a0, 2 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9 ; RV64-BITS-512-NEXT: vmsne.vi v0, v10, 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv2i1( %a) ret %res } define @reverse_nxv4i1( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv4i1: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v10, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv4i1: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: srli a0, a0, 1 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9 ; RV32-BITS-256-NEXT: vmsne.vi v0, v10, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv4i1: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: srli a0, a0, 1 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9 ; RV32-BITS-512-NEXT: vmsne.vi v0, v10, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv4i1: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 1 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v10, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv4i1: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: srli a0, a0, 1 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9 ; RV64-BITS-256-NEXT: vmsne.vi v0, v10, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv4i1: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: srli a0, a0, 1 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9 ; RV64-BITS-512-NEXT: vmsne.vi v0, v10, 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv4i1( %a) ret %res } define @reverse_nxv8i1( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv8i1: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v10, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v8 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v10, v8 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v11, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv8i1: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9 ; RV32-BITS-256-NEXT: vmsne.vi v0, v10, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv8i1: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9 ; RV32-BITS-512-NEXT: vmsne.vi v0, v10, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv8i1: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v10, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v8 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v10, v8 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v11, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv8i1: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9 ; RV64-BITS-256-NEXT: vmsne.vi v0, v10, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv8i1: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9 ; RV64-BITS-512-NEXT: vmsne.vi v0, v10, 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv8i1( %a) ret %res } define @reverse_nxv16i1( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv16i1: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v8 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v10, 0 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v10, v10, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v8 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v8 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv16i1: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vid.v v10 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vrsub.vx v12, v10, a0 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vrgather.vv v11, v8, v12 ; RV32-BITS-256-NEXT: vrgather.vv v10, v9, v12 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV32-BITS-256-NEXT: vmsne.vi v0, v10, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv16i1: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vid.v v10 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vrsub.vx v12, v10, a0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vrgather.vv v11, v8, v12 ; RV32-BITS-512-NEXT: vrgather.vv v10, v9, v12 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV32-BITS-512-NEXT: vmsne.vi v0, v10, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv16i1: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v8 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v10, 0 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v10, v10, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v8 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v8 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv16i1: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vid.v v10 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vrsub.vx v12, v10, a0 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vrgather.vv v11, v8, v12 ; RV64-BITS-256-NEXT: vrgather.vv v10, v9, v12 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV64-BITS-256-NEXT: vmsne.vi v0, v10, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv16i1: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vid.v v10 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vrsub.vx v12, v10, a0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vrgather.vv v11, v8, v12 ; RV64-BITS-512-NEXT: vrgather.vv v10, v9, v12 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV64-BITS-512-NEXT: vmsne.vi v0, v10, 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv16i1( %a) ret %res } define @reverse_nxv32i1( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv32i1: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v12 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v8, v16 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v9, v16 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v16 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v16 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv32i1: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vid.v v12 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vrgather.vv v15, v8, v16 ; RV32-BITS-256-NEXT: vrgather.vv v14, v9, v16 ; RV32-BITS-256-NEXT: vrgather.vv v13, v10, v16 ; RV32-BITS-256-NEXT: vrgather.vv v12, v11, v16 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV32-BITS-256-NEXT: vmsne.vi v0, v12, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv32i1: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vid.v v12 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vrgather.vv v15, v8, v16 ; RV32-BITS-512-NEXT: vrgather.vv v14, v9, v16 ; RV32-BITS-512-NEXT: vrgather.vv v13, v10, v16 ; RV32-BITS-512-NEXT: vrgather.vv v12, v11, v16 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV32-BITS-512-NEXT: vmsne.vi v0, v12, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i1: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v12 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v8, v16 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v9, v16 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v16 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v16 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv32i1: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vid.v v12 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vrgather.vv v15, v8, v16 ; RV64-BITS-256-NEXT: vrgather.vv v14, v9, v16 ; RV64-BITS-256-NEXT: vrgather.vv v13, v10, v16 ; RV64-BITS-256-NEXT: vrgather.vv v12, v11, v16 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV64-BITS-256-NEXT: vmsne.vi v0, v12, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv32i1: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vid.v v12 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vrgather.vv v15, v8, v16 ; RV64-BITS-512-NEXT: vrgather.vv v14, v9, v16 ; RV64-BITS-512-NEXT: vrgather.vv v13, v10, v16 ; RV64-BITS-512-NEXT: vrgather.vv v12, v11, v16 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV64-BITS-512-NEXT: vmsne.vi v0, v12, 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv32i1( %a) ret %res } define @reverse_nxv64i1( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv64i1: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v16 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v24, v16, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v16, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v16, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v17, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v18, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v19, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v20, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v21, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v22, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v23, v24 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv64i1: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vid.v v16 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vrsub.vx v24, v16, a0 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV32-BITS-256-NEXT: vmerge.vim v16, v8, 1, v0 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vrgather.vv v15, v16, v24 ; RV32-BITS-256-NEXT: vrgather.vv v14, v17, v24 ; RV32-BITS-256-NEXT: vrgather.vv v13, v18, v24 ; RV32-BITS-256-NEXT: vrgather.vv v12, v19, v24 ; RV32-BITS-256-NEXT: vrgather.vv v11, v20, v24 ; RV32-BITS-256-NEXT: vrgather.vv v10, v21, v24 ; RV32-BITS-256-NEXT: vrgather.vv v9, v22, v24 ; RV32-BITS-256-NEXT: vrgather.vv v8, v23, v24 ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv64i1: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vid.v v16 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vrsub.vx v24, v16, a0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV32-BITS-512-NEXT: vmerge.vim v16, v8, 1, v0 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vrgather.vv v15, v16, v24 ; RV32-BITS-512-NEXT: vrgather.vv v14, v17, v24 ; RV32-BITS-512-NEXT: vrgather.vv v13, v18, v24 ; RV32-BITS-512-NEXT: vrgather.vv v12, v19, v24 ; RV32-BITS-512-NEXT: vrgather.vv v11, v20, v24 ; RV32-BITS-512-NEXT: vrgather.vv v10, v21, v24 ; RV32-BITS-512-NEXT: vrgather.vv v9, v22, v24 ; RV32-BITS-512-NEXT: vrgather.vv v8, v23, v24 ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv64i1: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v16 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v24, v16, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v16, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v16, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v17, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v18, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v19, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v20, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v21, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v22, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v23, v24 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv64i1: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vid.v v16 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vrsub.vx v24, v16, a0 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV64-BITS-256-NEXT: vmerge.vim v16, v8, 1, v0 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vrgather.vv v15, v16, v24 ; RV64-BITS-256-NEXT: vrgather.vv v14, v17, v24 ; RV64-BITS-256-NEXT: vrgather.vv v13, v18, v24 ; RV64-BITS-256-NEXT: vrgather.vv v12, v19, v24 ; RV64-BITS-256-NEXT: vrgather.vv v11, v20, v24 ; RV64-BITS-256-NEXT: vrgather.vv v10, v21, v24 ; RV64-BITS-256-NEXT: vrgather.vv v9, v22, v24 ; RV64-BITS-256-NEXT: vrgather.vv v8, v23, v24 ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv64i1: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vid.v v16 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vrsub.vx v24, v16, a0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV64-BITS-512-NEXT: vmerge.vim v16, v8, 1, v0 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vrgather.vv v15, v16, v24 ; RV64-BITS-512-NEXT: vrgather.vv v14, v17, v24 ; RV64-BITS-512-NEXT: vrgather.vv v13, v18, v24 ; RV64-BITS-512-NEXT: vrgather.vv v12, v19, v24 ; RV64-BITS-512-NEXT: vrgather.vv v11, v20, v24 ; RV64-BITS-512-NEXT: vrgather.vv v10, v21, v24 ; RV64-BITS-512-NEXT: vrgather.vv v9, v22, v24 ; RV64-BITS-512-NEXT: vrgather.vv v8, v23, v24 ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv64i1( %a) ret %res } ; ; VECTOR_REVERSE - integer ; define @reverse_nxv1i8( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv1i8: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 3 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv1i8: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: srli a0, a0, 3 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-256-NEXT: vmv1r.v v8, v9 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv1i8: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: srli a0, a0, 3 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-512-NEXT: vmv1r.v v8, v9 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv1i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 3 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv1i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 3 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-256-NEXT: vmv1r.v v8, v9 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv1i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 3 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-512-NEXT: vmv1r.v v8, v9 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv1i8( %a) ret %res } define @reverse_nxv2i8( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv2i8: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv2i8: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: srli a0, a0, 2 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-256-NEXT: vmv1r.v v8, v9 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv2i8: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: srli a0, a0, 2 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-512-NEXT: vmv1r.v v8, v9 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv2i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv2i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 2 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-256-NEXT: vmv1r.v v8, v9 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv2i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 2 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-512-NEXT: vmv1r.v v8, v9 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv2i8( %a) ret %res } define @reverse_nxv4i8( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv4i8: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv4i8: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: srli a0, a0, 1 ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-256-NEXT: vmv1r.v v8, v9 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv4i8: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: srli a0, a0, 1 ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-512-NEXT: vmv1r.v v8, v9 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv4i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 1 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv4i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 1 ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-256-NEXT: vmv1r.v v8, v9 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv4i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 1 ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-512-NEXT: vmv1r.v v8, v9 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv4i8( %a) ret %res } define @reverse_nxv8i8( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv8i8: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v10 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v9 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv8i8: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-256-NEXT: vmv.v.v v8, v9 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv8i8: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV32-BITS-512-NEXT: vmv.v.v v8, v9 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv8i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v10 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v9 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv8i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-256-NEXT: vmv.v.v v8, v9 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv8i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 ; RV64-BITS-512-NEXT: vmv.v.v v8, v9 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv8i8( %a) ret %res } define @reverse_nxv16i8( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv16i8: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v10 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v12, v10, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v8, v12 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v9, v12 ; RV32-BITS-UNKNOWN-NEXT: vmv2r.v v8, v10 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv16i8: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vid.v v10 ; RV32-BITS-256-NEXT: vrsub.vx v12, v10, a0 ; RV32-BITS-256-NEXT: vrgather.vv v11, v8, v12 ; RV32-BITS-256-NEXT: vrgather.vv v10, v9, v12 ; RV32-BITS-256-NEXT: vmv2r.v v8, v10 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv16i8: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vid.v v10 ; RV32-BITS-512-NEXT: vrsub.vx v12, v10, a0 ; RV32-BITS-512-NEXT: vrgather.vv v11, v8, v12 ; RV32-BITS-512-NEXT: vrgather.vv v10, v9, v12 ; RV32-BITS-512-NEXT: vmv2r.v v8, v10 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv16i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v10 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v12, v10, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v8, v12 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v9, v12 ; RV64-BITS-UNKNOWN-NEXT: vmv2r.v v8, v10 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv16i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vid.v v10 ; RV64-BITS-256-NEXT: vrsub.vx v12, v10, a0 ; RV64-BITS-256-NEXT: vrgather.vv v11, v8, v12 ; RV64-BITS-256-NEXT: vrgather.vv v10, v9, v12 ; RV64-BITS-256-NEXT: vmv2r.v v8, v10 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv16i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vid.v v10 ; RV64-BITS-512-NEXT: vrsub.vx v12, v10, a0 ; RV64-BITS-512-NEXT: vrgather.vv v11, v8, v12 ; RV64-BITS-512-NEXT: vrgather.vv v10, v9, v12 ; RV64-BITS-512-NEXT: vmv2r.v v8, v10 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv16i8( %a) ret %res } define @reverse_nxv32i8( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv32i8: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v12 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v8, v16 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v9, v16 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v16 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v16 ; RV32-BITS-UNKNOWN-NEXT: vmv4r.v v8, v12 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv32i8: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vid.v v12 ; RV32-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-256-NEXT: vrgather.vv v15, v8, v16 ; RV32-BITS-256-NEXT: vrgather.vv v14, v9, v16 ; RV32-BITS-256-NEXT: vrgather.vv v13, v10, v16 ; RV32-BITS-256-NEXT: vrgather.vv v12, v11, v16 ; RV32-BITS-256-NEXT: vmv4r.v v8, v12 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv32i8: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vid.v v12 ; RV32-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-512-NEXT: vrgather.vv v15, v8, v16 ; RV32-BITS-512-NEXT: vrgather.vv v14, v9, v16 ; RV32-BITS-512-NEXT: vrgather.vv v13, v10, v16 ; RV32-BITS-512-NEXT: vrgather.vv v12, v11, v16 ; RV32-BITS-512-NEXT: vmv4r.v v8, v12 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v12 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v8, v16 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v9, v16 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v16 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v16 ; RV64-BITS-UNKNOWN-NEXT: vmv4r.v v8, v12 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv32i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vid.v v12 ; RV64-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-256-NEXT: vrgather.vv v15, v8, v16 ; RV64-BITS-256-NEXT: vrgather.vv v14, v9, v16 ; RV64-BITS-256-NEXT: vrgather.vv v13, v10, v16 ; RV64-BITS-256-NEXT: vrgather.vv v12, v11, v16 ; RV64-BITS-256-NEXT: vmv4r.v v8, v12 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv32i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vid.v v12 ; RV64-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-512-NEXT: vrgather.vv v15, v8, v16 ; RV64-BITS-512-NEXT: vrgather.vv v14, v9, v16 ; RV64-BITS-512-NEXT: vrgather.vv v13, v10, v16 ; RV64-BITS-512-NEXT: vrgather.vv v12, v11, v16 ; RV64-BITS-512-NEXT: vmv4r.v v8, v12 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv32i8( %a) ret %res } define @reverse_nxv64i8( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv64i8: ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv8r.v v16, v8 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV32-BITS-UNKNOWN-NEXT: vid.v v8 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v24, v8, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v16, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v17, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v18, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v19, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v20, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v21, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v22, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v23, v24 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv64i8: ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vmv8r.v v16, v8 ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: addi a0, a0, -1 ; RV32-BITS-256-NEXT: vid.v v8 ; RV32-BITS-256-NEXT: vrsub.vx v24, v8, a0 ; RV32-BITS-256-NEXT: vrgather.vv v15, v16, v24 ; RV32-BITS-256-NEXT: vrgather.vv v14, v17, v24 ; RV32-BITS-256-NEXT: vrgather.vv v13, v18, v24 ; RV32-BITS-256-NEXT: vrgather.vv v12, v19, v24 ; RV32-BITS-256-NEXT: vrgather.vv v11, v20, v24 ; RV32-BITS-256-NEXT: vrgather.vv v10, v21, v24 ; RV32-BITS-256-NEXT: vrgather.vv v9, v22, v24 ; RV32-BITS-256-NEXT: vrgather.vv v8, v23, v24 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv64i8: ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vmv8r.v v16, v8 ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: addi a0, a0, -1 ; RV32-BITS-512-NEXT: vid.v v8 ; RV32-BITS-512-NEXT: vrsub.vx v24, v8, a0 ; RV32-BITS-512-NEXT: vrgather.vv v15, v16, v24 ; RV32-BITS-512-NEXT: vrgather.vv v14, v17, v24 ; RV32-BITS-512-NEXT: vrgather.vv v13, v18, v24 ; RV32-BITS-512-NEXT: vrgather.vv v12, v19, v24 ; RV32-BITS-512-NEXT: vrgather.vv v11, v20, v24 ; RV32-BITS-512-NEXT: vrgather.vv v10, v21, v24 ; RV32-BITS-512-NEXT: vrgather.vv v9, v22, v24 ; RV32-BITS-512-NEXT: vrgather.vv v8, v23, v24 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv64i8: ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv8r.v v16, v8 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 ; RV64-BITS-UNKNOWN-NEXT: vid.v v8 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v24, v8, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v16, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v17, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v18, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v19, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v20, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v21, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v22, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v23, v24 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv64i8: ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vmv8r.v v16, v8 ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: addi a0, a0, -1 ; RV64-BITS-256-NEXT: vid.v v8 ; RV64-BITS-256-NEXT: vrsub.vx v24, v8, a0 ; RV64-BITS-256-NEXT: vrgather.vv v15, v16, v24 ; RV64-BITS-256-NEXT: vrgather.vv v14, v17, v24 ; RV64-BITS-256-NEXT: vrgather.vv v13, v18, v24 ; RV64-BITS-256-NEXT: vrgather.vv v12, v19, v24 ; RV64-BITS-256-NEXT: vrgather.vv v11, v20, v24 ; RV64-BITS-256-NEXT: vrgather.vv v10, v21, v24 ; RV64-BITS-256-NEXT: vrgather.vv v9, v22, v24 ; RV64-BITS-256-NEXT: vrgather.vv v8, v23, v24 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv64i8: ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vmv8r.v v16, v8 ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: addi a0, a0, -1 ; RV64-BITS-512-NEXT: vid.v v8 ; RV64-BITS-512-NEXT: vrsub.vx v24, v8, a0 ; RV64-BITS-512-NEXT: vrgather.vv v15, v16, v24 ; RV64-BITS-512-NEXT: vrgather.vv v14, v17, v24 ; RV64-BITS-512-NEXT: vrgather.vv v13, v18, v24 ; RV64-BITS-512-NEXT: vrgather.vv v12, v19, v24 ; RV64-BITS-512-NEXT: vrgather.vv v11, v20, v24 ; RV64-BITS-512-NEXT: vrgather.vv v10, v21, v24 ; RV64-BITS-512-NEXT: vrgather.vv v9, v22, v24 ; RV64-BITS-512-NEXT: vrgather.vv v8, v23, v24 ; RV64-BITS-512-NEXT: ret %res = call @llvm.vector.reverse.nxv64i8( %a) ret %res } define @reverse_nxv1i16( %a) { ; CHECK-LABEL: reverse_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv1i16( %a) ret %res } define @reverse_nxv2i16( %a) { ; CHECK-LABEL: reverse_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv2i16( %a) ret %res } define @reverse_nxv4i16( %a) { ; CHECK-LABEL: reverse_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv4i16( %a) ret %res } define @reverse_nxv8i16( %a) { ; CHECK-LABEL: reverse_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v11, v8, v12 ; CHECK-NEXT: vrgather.vv v10, v9, v12 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv8i16( %a) ret %res } define @reverse_nxv16i16( %a) { ; CHECK-LABEL: reverse_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v15, v8, v16 ; CHECK-NEXT: vrgather.vv v14, v9, v16 ; CHECK-NEXT: vrgather.vv v13, v10, v16 ; CHECK-NEXT: vrgather.vv v12, v11, v16 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv16i16( %a) ret %res } define @reverse_nxv32i16( %a) { ; CHECK-LABEL: reverse_nxv32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vx v24, v8, a0 ; CHECK-NEXT: vrgather.vv v15, v16, v24 ; CHECK-NEXT: vrgather.vv v14, v17, v24 ; CHECK-NEXT: vrgather.vv v13, v18, v24 ; CHECK-NEXT: vrgather.vv v12, v19, v24 ; CHECK-NEXT: vrgather.vv v11, v20, v24 ; CHECK-NEXT: vrgather.vv v10, v21, v24 ; CHECK-NEXT: vrgather.vv v9, v22, v24 ; CHECK-NEXT: vrgather.vv v8, v23, v24 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv32i16( %a) ret %res } define @reverse_nxv1i32( %a) { ; CHECK-LABEL: reverse_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv1i32( %a) ret %res } define @reverse_nxv2i32( %a) { ; CHECK-LABEL: reverse_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv2i32( %a) ret %res } define @reverse_nxv4i32( %a) { ; CHECK-LABEL: reverse_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v11, v8, v12 ; CHECK-NEXT: vrgather.vv v10, v9, v12 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv4i32( %a) ret %res } define @reverse_nxv8i32( %a) { ; CHECK-LABEL: reverse_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v15, v8, v16 ; CHECK-NEXT: vrgather.vv v14, v9, v16 ; CHECK-NEXT: vrgather.vv v13, v10, v16 ; CHECK-NEXT: vrgather.vv v12, v11, v16 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv8i32( %a) ret %res } define @reverse_nxv16i32( %a) { ; CHECK-LABEL: reverse_nxv16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vx v24, v8, a0 ; CHECK-NEXT: vrgather.vv v15, v16, v24 ; CHECK-NEXT: vrgather.vv v14, v17, v24 ; CHECK-NEXT: vrgather.vv v13, v18, v24 ; CHECK-NEXT: vrgather.vv v12, v19, v24 ; CHECK-NEXT: vrgather.vv v11, v20, v24 ; CHECK-NEXT: vrgather.vv v10, v21, v24 ; CHECK-NEXT: vrgather.vv v9, v22, v24 ; CHECK-NEXT: vrgather.vv v8, v23, v24 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv16i32( %a) ret %res } define @reverse_nxv1i64( %a) { ; CHECK-LABEL: reverse_nxv1i64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv1i64( %a) ret %res } define @reverse_nxv2i64( %a) { ; CHECK-LABEL: reverse_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v11, v8, v12 ; CHECK-NEXT: vrgather.vv v10, v9, v12 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv2i64( %a) ret %res } define @reverse_nxv4i64( %a) { ; CHECK-LABEL: reverse_nxv4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v15, v8, v16 ; CHECK-NEXT: vrgather.vv v14, v9, v16 ; CHECK-NEXT: vrgather.vv v13, v10, v16 ; CHECK-NEXT: vrgather.vv v12, v11, v16 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv4i64( %a) ret %res } define @reverse_nxv8i64( %a) { ; CHECK-LABEL: reverse_nxv8i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vx v24, v8, a0 ; CHECK-NEXT: vrgather.vv v15, v16, v24 ; CHECK-NEXT: vrgather.vv v14, v17, v24 ; CHECK-NEXT: vrgather.vv v13, v18, v24 ; CHECK-NEXT: vrgather.vv v12, v19, v24 ; CHECK-NEXT: vrgather.vv v11, v20, v24 ; CHECK-NEXT: vrgather.vv v10, v21, v24 ; CHECK-NEXT: vrgather.vv v9, v22, v24 ; CHECK-NEXT: vrgather.vv v8, v23, v24 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv8i64( %a) ret %res } ; ; VECTOR_REVERSE - floating point ; define @reverse_nxv1bf16( %a) { ; CHECK-LABEL: reverse_nxv1bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv1bf16( %a) ret %res } define @reverse_nxv2bf16( %a) { ; CHECK-LABEL: reverse_nxv2bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv2bf16( %a) ret %res } define @reverse_nxv4bf16( %a) { ; CHECK-LABEL: reverse_nxv4bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv4bf16( %a) ret %res } define @reverse_nxv8bf16( %a) { ; CHECK-LABEL: reverse_nxv8bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v11, v8, v12 ; CHECK-NEXT: vrgather.vv v10, v9, v12 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv8bf16( %a) ret %res } define @reverse_nxv16bf16( %a) { ; CHECK-LABEL: reverse_nxv16bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v15, v8, v16 ; CHECK-NEXT: vrgather.vv v14, v9, v16 ; CHECK-NEXT: vrgather.vv v13, v10, v16 ; CHECK-NEXT: vrgather.vv v12, v11, v16 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv16bf16( %a) ret %res } define @reverse_nxv32bf16( %a) { ; CHECK-LABEL: reverse_nxv32bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vx v24, v8, a0 ; CHECK-NEXT: vrgather.vv v15, v16, v24 ; CHECK-NEXT: vrgather.vv v14, v17, v24 ; CHECK-NEXT: vrgather.vv v13, v18, v24 ; CHECK-NEXT: vrgather.vv v12, v19, v24 ; CHECK-NEXT: vrgather.vv v11, v20, v24 ; CHECK-NEXT: vrgather.vv v10, v21, v24 ; CHECK-NEXT: vrgather.vv v9, v22, v24 ; CHECK-NEXT: vrgather.vv v8, v23, v24 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv32bf16( %a) ret %res } define @reverse_nxv1f16( %a) { ; CHECK-LABEL: reverse_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv1f16( %a) ret %res } define @reverse_nxv2f16( %a) { ; CHECK-LABEL: reverse_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv2f16( %a) ret %res } define @reverse_nxv4f16( %a) { ; CHECK-LABEL: reverse_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv4f16( %a) ret %res } define @reverse_nxv8f16( %a) { ; CHECK-LABEL: reverse_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v11, v8, v12 ; CHECK-NEXT: vrgather.vv v10, v9, v12 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv8f16( %a) ret %res } define @reverse_nxv16f16( %a) { ; CHECK-LABEL: reverse_nxv16f16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v15, v8, v16 ; CHECK-NEXT: vrgather.vv v14, v9, v16 ; CHECK-NEXT: vrgather.vv v13, v10, v16 ; CHECK-NEXT: vrgather.vv v12, v11, v16 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv16f16( %a) ret %res } define @reverse_nxv32f16( %a) { ; CHECK-LABEL: reverse_nxv32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vx v24, v8, a0 ; CHECK-NEXT: vrgather.vv v15, v16, v24 ; CHECK-NEXT: vrgather.vv v14, v17, v24 ; CHECK-NEXT: vrgather.vv v13, v18, v24 ; CHECK-NEXT: vrgather.vv v12, v19, v24 ; CHECK-NEXT: vrgather.vv v11, v20, v24 ; CHECK-NEXT: vrgather.vv v10, v21, v24 ; CHECK-NEXT: vrgather.vv v9, v22, v24 ; CHECK-NEXT: vrgather.vv v8, v23, v24 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv32f16( %a) ret %res } define @reverse_nxv1f32( %a) { ; CHECK-LABEL: reverse_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv1f32( %a) ret %res } define @reverse_nxv2f32( %a) { ; CHECK-LABEL: reverse_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv2f32( %a) ret %res } define @reverse_nxv4f32( %a) { ; CHECK-LABEL: reverse_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v11, v8, v12 ; CHECK-NEXT: vrgather.vv v10, v9, v12 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv4f32( %a) ret %res } define @reverse_nxv8f32( %a) { ; CHECK-LABEL: reverse_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v15, v8, v16 ; CHECK-NEXT: vrgather.vv v14, v9, v16 ; CHECK-NEXT: vrgather.vv v13, v10, v16 ; CHECK-NEXT: vrgather.vv v12, v11, v16 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv8f32( %a) ret %res } define @reverse_nxv16f32( %a) { ; CHECK-LABEL: reverse_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vx v24, v8, a0 ; CHECK-NEXT: vrgather.vv v15, v16, v24 ; CHECK-NEXT: vrgather.vv v14, v17, v24 ; CHECK-NEXT: vrgather.vv v13, v18, v24 ; CHECK-NEXT: vrgather.vv v12, v19, v24 ; CHECK-NEXT: vrgather.vv v11, v20, v24 ; CHECK-NEXT: vrgather.vv v10, v21, v24 ; CHECK-NEXT: vrgather.vv v9, v22, v24 ; CHECK-NEXT: vrgather.vv v8, v23, v24 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv16f32( %a) ret %res } define @reverse_nxv1f64( %a) { ; CHECK-LABEL: reverse_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv1f64( %a) ret %res } define @reverse_nxv2f64( %a) { ; CHECK-LABEL: reverse_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v11, v8, v12 ; CHECK-NEXT: vrgather.vv v10, v9, v12 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv2f64( %a) ret %res } define @reverse_nxv4f64( %a) { ; CHECK-LABEL: reverse_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v15, v8, v16 ; CHECK-NEXT: vrgather.vv v14, v9, v16 ; CHECK-NEXT: vrgather.vv v13, v10, v16 ; CHECK-NEXT: vrgather.vv v12, v11, v16 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv4f64( %a) ret %res } define @reverse_nxv8f64( %a) { ; CHECK-LABEL: reverse_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vx v24, v8, a0 ; CHECK-NEXT: vrgather.vv v15, v16, v24 ; CHECK-NEXT: vrgather.vv v14, v17, v24 ; CHECK-NEXT: vrgather.vv v13, v18, v24 ; CHECK-NEXT: vrgather.vv v12, v19, v24 ; CHECK-NEXT: vrgather.vv v11, v20, v24 ; CHECK-NEXT: vrgather.vv v10, v21, v24 ; CHECK-NEXT: vrgather.vv v9, v22, v24 ; CHECK-NEXT: vrgather.vv v8, v23, v24 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv8f64( %a) ret %res } ; Test widen reverse vector define @reverse_nxv3i64( %a) { ; CHECK-LABEL: reverse_nxv3i64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v14, v12, a0 ; CHECK-NEXT: vrgather.vv v13, v10, v14 ; CHECK-NEXT: vrgather.vv v10, v9, v14 ; CHECK-NEXT: vmv.v.v v12, v13 ; CHECK-NEXT: vrgather.vv v15, v8, v14 ; CHECK-NEXT: vmv.v.v v13, v10 ; CHECK-NEXT: vrgather.vv v8, v11, v14 ; CHECK-NEXT: vmv.v.v v14, v15 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv3i64( %a) ret %res } define @reverse_nxv6i64( %a) { ; CHECK-LABEL: reverse_nxv6i64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v21, v10, v24 ; CHECK-NEXT: vrgather.vv v19, v12, v24 ; CHECK-NEXT: vrgather.vv v18, v13, v24 ; CHECK-NEXT: vrgather.vv v20, v11, v24 ; CHECK-NEXT: vmv2r.v v16, v18 ; CHECK-NEXT: vmv2r.v v18, v20 ; CHECK-NEXT: vrgather.vv v23, v8, v24 ; CHECK-NEXT: vrgather.vv v22, v9, v24 ; CHECK-NEXT: vrgather.vv v9, v14, v24 ; CHECK-NEXT: vrgather.vv v8, v15, v24 ; CHECK-NEXT: vmv2r.v v20, v22 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret %res = call @llvm.vector.reverse.nxv6i64( %a) ret %res } define @reverse_nxv12i64( %a) { ; RV32-LABEL: reverse_nxv12i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: .cfi_def_cfa_offset 80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: .cfi_offset s0, -8 ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: .cfi_def_cfa s0, 0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vid.v v20 ; RV32-NEXT: srli a1, a0, 3 ; RV32-NEXT: addi a1, a1, -1 ; RV32-NEXT: vrsub.vx v7, v20, a1 ; RV32-NEXT: vrgather.vv v31, v12, v7 ; RV32-NEXT: vrgather.vv v23, v8, v7 ; RV32-NEXT: vrgather.vv v30, v13, v7 ; RV32-NEXT: vrgather.vv v22, v9, v7 ; RV32-NEXT: vrgather.vv v29, v14, v7 ; RV32-NEXT: vrgather.vv v21, v10, v7 ; RV32-NEXT: vrgather.vv v28, v15, v7 ; RV32-NEXT: vrgather.vv v20, v11, v7 ; RV32-NEXT: addi a1, sp, 64 ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, a1, a0 ; RV32-NEXT: vrgather.vv v27, v16, v7 ; RV32-NEXT: vs4r.v v20, (a0) ; RV32-NEXT: vrgather.vv v26, v17, v7 ; RV32-NEXT: vrgather.vv v25, v18, v7 ; RV32-NEXT: vrgather.vv v24, v19, v7 ; RV32-NEXT: vs8r.v v24, (a1) ; RV32-NEXT: vl8re64.v v16, (a0) ; RV32-NEXT: vl8re64.v v8, (a1) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: .cfi_def_cfa sp, 80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: .cfi_restore s0 ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: reverse_nxv12i64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: .cfi_def_cfa_offset 80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: .cfi_offset s0, -16 ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: .cfi_def_cfa s0, 0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vid.v v20 ; RV64-NEXT: srli a1, a0, 3 ; RV64-NEXT: addi a1, a1, -1 ; RV64-NEXT: vrsub.vx v7, v20, a1 ; RV64-NEXT: vrgather.vv v31, v12, v7 ; RV64-NEXT: vrgather.vv v23, v8, v7 ; RV64-NEXT: vrgather.vv v30, v13, v7 ; RV64-NEXT: vrgather.vv v22, v9, v7 ; RV64-NEXT: vrgather.vv v29, v14, v7 ; RV64-NEXT: vrgather.vv v21, v10, v7 ; RV64-NEXT: vrgather.vv v28, v15, v7 ; RV64-NEXT: vrgather.vv v20, v11, v7 ; RV64-NEXT: addi a1, sp, 64 ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: vrgather.vv v27, v16, v7 ; RV64-NEXT: vs4r.v v20, (a0) ; RV64-NEXT: vrgather.vv v26, v17, v7 ; RV64-NEXT: vrgather.vv v25, v18, v7 ; RV64-NEXT: vrgather.vv v24, v19, v7 ; RV64-NEXT: vs8r.v v24, (a1) ; RV64-NEXT: vl8re64.v v16, (a0) ; RV64-NEXT: vl8re64.v v8, (a1) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: .cfi_def_cfa sp, 80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: .cfi_restore s0 ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %res = call @llvm.vector.reverse.nxv12i64( %a) ret %res } declare @llvm.vector.reverse.nxv2i1() declare @llvm.vector.reverse.nxv4i1() declare @llvm.vector.reverse.nxv8i1() declare @llvm.vector.reverse.nxv16i1() declare @llvm.vector.reverse.nxv32i1() declare @llvm.vector.reverse.nxv64i1() declare @llvm.vector.reverse.nxv1i8() declare @llvm.vector.reverse.nxv2i8() declare @llvm.vector.reverse.nxv4i8() declare @llvm.vector.reverse.nxv8i8() declare @llvm.vector.reverse.nxv16i8() declare @llvm.vector.reverse.nxv32i8() declare @llvm.vector.reverse.nxv64i8() declare @llvm.vector.reverse.nxv1i16() declare @llvm.vector.reverse.nxv2i16() declare @llvm.vector.reverse.nxv4i16() declare @llvm.vector.reverse.nxv8i16() declare @llvm.vector.reverse.nxv16i16() declare @llvm.vector.reverse.nxv32i16() declare @llvm.vector.reverse.nxv1i32() declare @llvm.vector.reverse.nxv2i32() declare @llvm.vector.reverse.nxv4i32() declare @llvm.vector.reverse.nxv8i32() declare @llvm.vector.reverse.nxv16i32() declare @llvm.vector.reverse.nxv1i64() declare @llvm.vector.reverse.nxv2i64() declare @llvm.vector.reverse.nxv4i64() declare @llvm.vector.reverse.nxv8i64() declare @llvm.vector.reverse.nxv1f16() declare @llvm.vector.reverse.nxv2f16() declare @llvm.vector.reverse.nxv4f16() declare @llvm.vector.reverse.nxv8f16() declare @llvm.vector.reverse.nxv16f16() declare @llvm.vector.reverse.nxv32f16() declare @llvm.vector.reverse.nxv1f32() declare @llvm.vector.reverse.nxv2f32() declare @llvm.vector.reverse.nxv4f32() declare @llvm.vector.reverse.nxv8f32() declare @llvm.vector.reverse.nxv16f32() declare @llvm.vector.reverse.nxv1f64() declare @llvm.vector.reverse.nxv2f64() declare @llvm.vector.reverse.nxv4f64() declare @llvm.vector.reverse.nxv8f64() declare @llvm.vector.reverse.nxv3i64() declare @llvm.vector.reverse.nxv6i64() declare @llvm.vector.reverse.nxv12i64()