; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+zvfh,+zvfbfmin | FileCheck %s --check-prefixes=CHECK,V,RV32 ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+zvfh,+zvfbfmin | FileCheck %s --check-prefixes=CHECK,V,RV64 ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+zvfhmin,+zvfbfmin | FileCheck %s --check-prefixes=CHECK,V,RV32 ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+zvfhmin,+zvfbfmin | FileCheck %s --check-prefixes=CHECK,V,RV64 ; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+zvbb,+zvfh,+zvfbfmin | FileCheck %s --check-prefixes=ZVBB,ZVBB-RV32 ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+zvbb,+zvfh,+zvfbfmin | FileCheck %s --check-prefixes=ZVBB,ZVBB-RV64 ; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+zvfhmin,+zvfbfmin,+experimental-xrivosvizip | FileCheck %s --check-prefixes=CHECK,ZIP ; Integers define @vector_interleave_nxv32i1_nxv16i1( %a, %b) { ; V-LABEL: vector_interleave_nxv32i1_nxv16i1: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; V-NEXT: vmv1r.v v9, v0 ; V-NEXT: vmv1r.v v0, v8 ; V-NEXT: vmv.v.i v10, 0 ; V-NEXT: li a0, -1 ; V-NEXT: vmerge.vim v12, v10, 1, v0 ; V-NEXT: vmv1r.v v0, v9 ; V-NEXT: vmerge.vim v14, v10, 1, v0 ; V-NEXT: vwaddu.vv v8, v14, v12 ; V-NEXT: vwmaccu.vx v8, a0, v12 ; V-NEXT: csrr a0, vlenb ; V-NEXT: vmsne.vi v12, v10, 0 ; V-NEXT: vmsne.vi v0, v8, 0 ; V-NEXT: srli a0, a0, 2 ; V-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; V-NEXT: vslideup.vx v0, v12, a0 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32i1_nxv16i1: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; ZVBB-NEXT: vmv1r.v v9, v0 ; ZVBB-NEXT: vmv1r.v v0, v8 ; ZVBB-NEXT: vmv.v.i v10, 0 ; ZVBB-NEXT: li a0, 1 ; ZVBB-NEXT: vmerge.vim v10, v10, 1, v0 ; ZVBB-NEXT: vwsll.vi v12, v10, 8 ; ZVBB-NEXT: vmv1r.v v0, v9 ; ZVBB-NEXT: vwaddu.wx v12, v12, a0, v0.t ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: vmsne.vi v8, v14, 0 ; ZVBB-NEXT: vmsne.vi v0, v12, 0 ; ZVBB-NEXT: srli a0, a0, 2 ; ZVBB-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; ZVBB-NEXT: vslideup.vx v0, v8, a0 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv32i1_nxv16i1: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZIP-NEXT: vmv1r.v v9, v0 ; ZIP-NEXT: vmv1r.v v0, v8 ; ZIP-NEXT: vmv.v.i v10, 0 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: vmerge.vim v12, v10, 1, v0 ; ZIP-NEXT: vmv1r.v v0, v9 ; ZIP-NEXT: vmerge.vim v8, v10, 1, v0 ; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 ; ZIP-NEXT: ri.vzip2a.vv v14, v8, v12 ; ZIP-NEXT: vmsne.vi v8, v10, 0 ; ZIP-NEXT: vmsne.vi v0, v14, 0 ; ZIP-NEXT: srli a0, a0, 2 ; ZIP-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; ZIP-NEXT: vslideup.vx v0, v8, a0 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv32i1( %a, %b) ret %res } define @vector_interleave_nxv32i8_nxv16i8( %a, %b) { ; V-LABEL: vector_interleave_nxv32i8_nxv16i8: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; V-NEXT: vmv2r.v v12, v10 ; V-NEXT: vmv2r.v v14, v8 ; V-NEXT: vwaddu.vv v8, v14, v12 ; V-NEXT: li a0, -1 ; V-NEXT: vwmaccu.vx v8, a0, v12 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32i8_nxv16i8: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZVBB-NEXT: vmv2r.v v12, v10 ; ZVBB-NEXT: vmv2r.v v14, v8 ; ZVBB-NEXT: vwsll.vi v8, v12, 8 ; ZVBB-NEXT: vwaddu.wv v8, v8, v14 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv32i8_nxv16i8: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZIP-NEXT: vmv2r.v v12, v10 ; ZIP-NEXT: vmv2r.v v14, v8 ; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 ; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv32i8( %a, %b) ret %res } define @vector_interleave_nxv16i16_nxv8i16( %a, %b) { ; V-LABEL: vector_interleave_nxv16i16_nxv8i16: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; V-NEXT: vmv2r.v v12, v10 ; V-NEXT: vmv2r.v v14, v8 ; V-NEXT: vwaddu.vv v8, v14, v12 ; V-NEXT: li a0, -1 ; V-NEXT: vwmaccu.vx v8, a0, v12 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16i16_nxv8i16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZVBB-NEXT: vmv2r.v v12, v10 ; ZVBB-NEXT: vmv2r.v v14, v8 ; ZVBB-NEXT: vwsll.vi v8, v12, 16 ; ZVBB-NEXT: vwaddu.wv v8, v8, v14 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv16i16_nxv8i16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZIP-NEXT: vmv2r.v v12, v10 ; ZIP-NEXT: vmv2r.v v14, v8 ; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 ; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv16i16( %a, %b) ret %res } define @vector_interleave_nxv8i32_nxv4i32( %a, %b) { ; V-LABEL: vector_interleave_nxv8i32_nxv4i32: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; V-NEXT: vmv2r.v v12, v10 ; V-NEXT: vmv2r.v v14, v8 ; V-NEXT: vwaddu.vv v8, v14, v12 ; V-NEXT: li a0, -1 ; V-NEXT: vwmaccu.vx v8, a0, v12 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; ZVBB-NEXT: vmv2r.v v12, v10 ; ZVBB-NEXT: vmv2r.v v14, v8 ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vwsll.vx v8, v12, a0 ; ZVBB-NEXT: vwaddu.wv v8, v8, v14 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv8i32_nxv4i32: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; ZIP-NEXT: vmv2r.v v12, v10 ; ZIP-NEXT: vmv2r.v v14, v8 ; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 ; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv8i32( %a, %b) ret %res } define @vector_interleave_nxv4i64_nxv2i64( %a, %b) { ; V-LABEL: vector_interleave_nxv4i64_nxv2i64: ; V: # %bb.0: ; V-NEXT: csrr a0, vlenb ; V-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; V-NEXT: vid.v v12 ; V-NEXT: srli a0, a0, 2 ; V-NEXT: vand.vi v13, v12, 1 ; V-NEXT: vmsne.vi v0, v13, 0 ; V-NEXT: vsrl.vi v16, v12, 1 ; V-NEXT: vadd.vx v16, v16, a0, v0.t ; V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; V-NEXT: vrgatherei16.vv v12, v8, v16 ; V-NEXT: vmv.v.v v8, v12 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv4i64_nxv2i64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; ZVBB-NEXT: vid.v v12 ; ZVBB-NEXT: srli a0, a0, 2 ; ZVBB-NEXT: vand.vi v13, v12, 1 ; ZVBB-NEXT: vmsne.vi v0, v13, 0 ; ZVBB-NEXT: vsrl.vi v16, v12, 1 ; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t ; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16 ; ZVBB-NEXT: vmv.v.v v8, v12 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv4i64_nxv2i64: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; ZIP-NEXT: vmv2r.v v12, v10 ; ZIP-NEXT: vmv2r.v v14, v8 ; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 ; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv4i64( %a, %b) ret %res } define @vector_interleave_nxv128i1_nxv64i1( %a, %b) { ; V-LABEL: vector_interleave_nxv128i1_nxv64i1: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; V-NEXT: vmv1r.v v9, v0 ; V-NEXT: vmv1r.v v0, v8 ; V-NEXT: vmv.v.i v24, 0 ; V-NEXT: li a0, -1 ; V-NEXT: vmerge.vim v16, v24, 1, v0 ; V-NEXT: vmv1r.v v0, v9 ; V-NEXT: vmerge.vim v24, v24, 1, v0 ; V-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; V-NEXT: vwaddu.vv v8, v24, v16 ; V-NEXT: vwaddu.vv v0, v28, v20 ; V-NEXT: vwmaccu.vx v8, a0, v16 ; V-NEXT: vwmaccu.vx v0, a0, v20 ; V-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; V-NEXT: vmsne.vi v16, v8, 0 ; V-NEXT: vmsne.vi v8, v0, 0 ; V-NEXT: vmv1r.v v0, v16 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv128i1_nxv64i1: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; ZVBB-NEXT: vmv.v.i v24, 0 ; ZVBB-NEXT: vmerge.vim v16, v24, 1, v0 ; ZVBB-NEXT: vmv1r.v v0, v8 ; ZVBB-NEXT: vmerge.vim v24, v24, 1, v0 ; ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; ZVBB-NEXT: vwsll.vi v8, v24, 8 ; ZVBB-NEXT: vwsll.vi v0, v28, 8 ; ZVBB-NEXT: vwaddu.wv v8, v8, v16 ; ZVBB-NEXT: vwaddu.wv v0, v0, v20 ; ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; ZVBB-NEXT: vmsne.vi v16, v8, 0 ; ZVBB-NEXT: vmsne.vi v8, v0, 0 ; ZVBB-NEXT: vmv1r.v v0, v16 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv128i1_nxv64i1: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; ZIP-NEXT: vmv1r.v v9, v0 ; ZIP-NEXT: vmv1r.v v0, v8 ; ZIP-NEXT: vmv.v.i v24, 0 ; ZIP-NEXT: vmerge.vim v16, v24, 1, v0 ; ZIP-NEXT: vmv1r.v v0, v9 ; ZIP-NEXT: vmerge.vim v8, v24, 1, v0 ; ZIP-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; ZIP-NEXT: ri.vzip2b.vv v4, v8, v16 ; ZIP-NEXT: ri.vzip2b.vv v28, v12, v20 ; ZIP-NEXT: ri.vzip2a.vv v0, v8, v16 ; ZIP-NEXT: ri.vzip2a.vv v24, v12, v20 ; ZIP-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; ZIP-NEXT: vmsne.vi v9, v0, 0 ; ZIP-NEXT: vmsne.vi v8, v24, 0 ; ZIP-NEXT: vmv1r.v v0, v9 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv128i1( %a, %b) ret %res } define @vector_interleave_nxv128i8_nxv64i8( %a, %b) { ; V-LABEL: vector_interleave_nxv128i8_nxv64i8: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; V-NEXT: vmv8r.v v24, v8 ; V-NEXT: vwaddu.vv v8, v24, v16 ; V-NEXT: li a0, -1 ; V-NEXT: vwaddu.vv v0, v28, v20 ; V-NEXT: vwmaccu.vx v8, a0, v16 ; V-NEXT: vwmaccu.vx v0, a0, v20 ; V-NEXT: vmv8r.v v16, v0 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv128i8_nxv64i8: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; ZVBB-NEXT: vwsll.vi v24, v16, 8 ; ZVBB-NEXT: vwsll.vi v0, v20, 8 ; ZVBB-NEXT: vwaddu.wv v24, v24, v8 ; ZVBB-NEXT: vwaddu.wv v0, v0, v12 ; ZVBB-NEXT: vmv8r.v v8, v24 ; ZVBB-NEXT: vmv8r.v v16, v0 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv128i8_nxv64i8: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 ; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 ; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 ; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 ; ZIP-NEXT: vmv8r.v v8, v24 ; ZIP-NEXT: vmv8r.v v16, v0 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv128i8( %a, %b) ret %res } define @vector_interleave_nxv64i16_nxv32i16( %a, %b) { ; V-LABEL: vector_interleave_nxv64i16_nxv32i16: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; V-NEXT: vmv8r.v v24, v8 ; V-NEXT: vwaddu.vv v8, v24, v16 ; V-NEXT: li a0, -1 ; V-NEXT: vwaddu.vv v0, v28, v20 ; V-NEXT: vwmaccu.vx v8, a0, v16 ; V-NEXT: vwmaccu.vx v0, a0, v20 ; V-NEXT: vmv8r.v v16, v0 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv64i16_nxv32i16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVBB-NEXT: vwsll.vi v24, v16, 16 ; ZVBB-NEXT: vwsll.vi v0, v20, 16 ; ZVBB-NEXT: vwaddu.wv v24, v24, v8 ; ZVBB-NEXT: vwaddu.wv v0, v0, v12 ; ZVBB-NEXT: vmv8r.v v8, v24 ; ZVBB-NEXT: vmv8r.v v16, v0 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv64i16_nxv32i16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 ; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 ; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 ; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 ; ZIP-NEXT: vmv8r.v v8, v24 ; ZIP-NEXT: vmv8r.v v16, v0 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv64i16( %a, %b) ret %res } define @vector_interleave_nxv32i32_nxv16i32( %a, %b) { ; V-LABEL: vector_interleave_nxv32i32_nxv16i32: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; V-NEXT: vmv8r.v v24, v8 ; V-NEXT: vwaddu.vv v8, v24, v16 ; V-NEXT: li a0, -1 ; V-NEXT: vwaddu.vv v0, v28, v20 ; V-NEXT: vwmaccu.vx v8, a0, v16 ; V-NEXT: vwmaccu.vx v0, a0, v20 ; V-NEXT: vmv8r.v v16, v0 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32i32_nxv16i32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; ZVBB-NEXT: vwsll.vx v24, v16, a0 ; ZVBB-NEXT: vwsll.vx v0, v20, a0 ; ZVBB-NEXT: vwaddu.wv v24, v24, v8 ; ZVBB-NEXT: vwaddu.wv v0, v0, v12 ; ZVBB-NEXT: vmv8r.v v8, v24 ; ZVBB-NEXT: vmv8r.v v16, v0 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv32i32_nxv16i32: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 ; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 ; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 ; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 ; ZIP-NEXT: vmv8r.v v8, v24 ; ZIP-NEXT: vmv8r.v v16, v0 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv32i32( %a, %b) ret %res } define @vector_interleave_nxv16i64_nxv8i64( %a, %b) { ; V-LABEL: vector_interleave_nxv16i64_nxv8i64: ; V: # %bb.0: ; V-NEXT: csrr a0, vlenb ; V-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; V-NEXT: vid.v v6 ; V-NEXT: vmv8r.v v24, v8 ; V-NEXT: srli a0, a0, 1 ; V-NEXT: vmv4r.v v28, v16 ; V-NEXT: vmv4r.v v16, v12 ; V-NEXT: vand.vi v8, v6, 1 ; V-NEXT: vmsne.vi v0, v8, 0 ; V-NEXT: vsrl.vi v6, v6, 1 ; V-NEXT: vadd.vx v6, v6, a0, v0.t ; V-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; V-NEXT: vrgatherei16.vv v8, v24, v6 ; V-NEXT: vrgatherei16.vv v24, v16, v6 ; V-NEXT: vmv.v.v v16, v24 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16i64_nxv8i64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; ZVBB-NEXT: vid.v v6 ; ZVBB-NEXT: vmv8r.v v24, v8 ; ZVBB-NEXT: srli a0, a0, 1 ; ZVBB-NEXT: vmv4r.v v28, v16 ; ZVBB-NEXT: vmv4r.v v16, v12 ; ZVBB-NEXT: vand.vi v8, v6, 1 ; ZVBB-NEXT: vmsne.vi v0, v8, 0 ; ZVBB-NEXT: vsrl.vi v6, v6, 1 ; ZVBB-NEXT: vadd.vx v6, v6, a0, v0.t ; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; ZVBB-NEXT: vrgatherei16.vv v8, v24, v6 ; ZVBB-NEXT: vrgatherei16.vv v24, v16, v6 ; ZVBB-NEXT: vmv.v.v v16, v24 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv16i64_nxv8i64: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 ; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 ; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 ; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 ; ZIP-NEXT: vmv8r.v v8, v24 ; ZIP-NEXT: vmv8r.v v16, v0 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv16i64( %a, %b) ret %res } define @vector_interleave_nxv8i32_nxv4i32_poison( %a) { ; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vzext.vf2 v8, v12 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; ZVBB-NEXT: vmv2r.v v12, v8 ; ZVBB-NEXT: vzext.vf2 v8, v12 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave2.nxv8i32( %a, poison) ret %res } define @vector_interleave_nxv8i32_nxv4i32_poison2( %a) { ; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vzext.vf2 v12, v8 ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsll.vx v8, v12, a0 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32_poison2: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; ZVBB-NEXT: vmv2r.v v12, v8 ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vwsll.vx v8, v12, a0 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave2.nxv8i32( poison, %a) ret %res } define @vector_interleave_nxv48i1_nxv16i1( %a, %b, %c) nounwind { ; CHECK-LABEL: vector_interleave_nxv48i1_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 ; CHECK-NEXT: slli a2, a1, 1 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmerge.vim v14, v12, 1, v0 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v18, v12, 1, v0 ; CHECK-NEXT: add a2, a3, a2 ; CHECK-NEXT: vsseg3e8.v v14, (a0) ; CHECK-NEXT: vl2r.v v8, (a2) ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: vl2r.v v10, (a3) ; CHECK-NEXT: vl2r.v v12, (a0) ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vmsne.vi v14, v8, 0 ; CHECK-NEXT: vmsne.vi v8, v10, 0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v0, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vslideup.vx v0, v14, a2 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv48i1_nxv16i1: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZVBB-NEXT: vmv1r.v v10, v0 ; ZVBB-NEXT: vmv1r.v v0, v8 ; ZVBB-NEXT: vmv.v.i v12, 0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: vmerge.vim v16, v12, 1, v0 ; ZVBB-NEXT: slli a2, a1, 1 ; ZVBB-NEXT: vmv1r.v v0, v10 ; ZVBB-NEXT: vmerge.vim v14, v12, 1, v0 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: vmv1r.v v0, v9 ; ZVBB-NEXT: vmerge.vim v18, v12, 1, v0 ; ZVBB-NEXT: add a2, a3, a2 ; ZVBB-NEXT: vsseg3e8.v v14, (a0) ; ZVBB-NEXT: vl2r.v v8, (a2) ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: vl2r.v v10, (a3) ; ZVBB-NEXT: vl2r.v v12, (a0) ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vmsne.vi v14, v8, 0 ; ZVBB-NEXT: vmsne.vi v8, v10, 0 ; ZVBB-NEXT: vmsne.vi v0, v12, 0 ; ZVBB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; ZVBB-NEXT: vslideup.vx v0, v8, a1 ; ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v0, v14, a2 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv48i1( %a, %b, %c) ret %res } define @vector_interleave_nxv48i8_nxv16i8( %a, %b, %c) nounwind { ; CHECK-LABEL: vector_interleave_nxv48i8_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: vl2r.v v8, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2r.v v10, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2r.v v12, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv48i8_nxv16i8: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: vsetvli a2, zero, e8, m2, ta, ma ; ZVBB-NEXT: vsseg3e8.v v8, (a0) ; ZVBB-NEXT: vl2r.v v8, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2r.v v10, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2r.v v12, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv48i8( %a, %b, %c) ret %res } define @vector_interleave_nxv24i16_nxv8i16( %a, %b, %c) nounwind { ; CHECK-LABEL: vector_interleave_nxv24i16_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: vl2re16.v v8, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2re16.v v10, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2re16.v v12, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv24i16_nxv8i16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: vsetvli a2, zero, e16, m2, ta, ma ; ZVBB-NEXT: vsseg3e16.v v8, (a0) ; ZVBB-NEXT: vl2re16.v v8, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2re16.v v10, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2re16.v v12, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv24i16( %a, %b, %c) ret %res } define @vector_interleave_nxv12i32_nxv4i32( %a, %b, %c) nounwind { ; CHECK-LABEL: vector_interleave_nxv12i32_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: vl2re32.v v8, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2re32.v v10, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2re32.v v12, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv12i32_nxv4i32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: vsetvli a2, zero, e32, m2, ta, ma ; ZVBB-NEXT: vsseg3e32.v v8, (a0) ; ZVBB-NEXT: vl2re32.v v8, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2re32.v v10, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2re32.v v12, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv12i32( %a, %b, %c) ret %res } define @vector_interleave_nxv6i64_nxv2i64( %a, %b, %c) nounwind { ; CHECK-LABEL: vector_interleave_nxv6i64_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: vl2re64.v v8, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2re64.v v10, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2re64.v v12, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv6i64_nxv2i64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: vsetvli a2, zero, e64, m2, ta, ma ; ZVBB-NEXT: vsseg3e64.v v8, (a0) ; ZVBB-NEXT: vl2re64.v v8, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2re64.v v10, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2re64.v v12, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv6i64( %a, %b, %c) ret %res } define @vector_interleave_nxv64i1_nxv16i1( %a, %b, %c, %d) nounwind { ; CHECK-LABEL: vector_interleave_nxv64i1_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v11, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 ; CHECK-NEXT: slli a2, a1, 1 ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: vmerge.vim v14, v12, 1, v0 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v18, v12, 1, v0 ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmerge.vim v20, v12, 1, v0 ; CHECK-NEXT: add a2, a4, a2 ; CHECK-NEXT: vsseg4e8.v v14, (a0) ; CHECK-NEXT: vl2r.v v8, (a2) ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vl2r.v v10, (a4) ; CHECK-NEXT: vl2r.v v12, (a3) ; CHECK-NEXT: vl2r.v v14, (a0) ; CHECK-NEXT: vmsne.vi v16, v8, 0 ; CHECK-NEXT: vmsne.vi v8, v10, 0 ; CHECK-NEXT: vmsne.vi v9, v12, 0 ; CHECK-NEXT: vmsne.vi v0, v14, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: vslideup.vx v0, v9, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vslideup.vx v0, v8, a2 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv64i1_nxv16i1: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZVBB-NEXT: vmv1r.v v11, v0 ; ZVBB-NEXT: vmv1r.v v0, v8 ; ZVBB-NEXT: vmv.v.i v12, 0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: vmerge.vim v16, v12, 1, v0 ; ZVBB-NEXT: slli a2, a1, 1 ; ZVBB-NEXT: vmv1r.v v0, v11 ; ZVBB-NEXT: vmerge.vim v14, v12, 1, v0 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: vmv1r.v v0, v9 ; ZVBB-NEXT: vmerge.vim v18, v12, 1, v0 ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: vmv1r.v v0, v10 ; ZVBB-NEXT: vmerge.vim v20, v12, 1, v0 ; ZVBB-NEXT: add a2, a4, a2 ; ZVBB-NEXT: vsseg4e8.v v14, (a0) ; ZVBB-NEXT: vl2r.v v8, (a2) ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vl2r.v v10, (a4) ; ZVBB-NEXT: vl2r.v v12, (a3) ; ZVBB-NEXT: vl2r.v v14, (a0) ; ZVBB-NEXT: vmsne.vi v16, v8, 0 ; ZVBB-NEXT: vmsne.vi v8, v10, 0 ; ZVBB-NEXT: vmsne.vi v9, v12, 0 ; ZVBB-NEXT: vmsne.vi v0, v14, 0 ; ZVBB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v16, a1 ; ZVBB-NEXT: vslideup.vx v0, v9, a1 ; ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v0, v8, a2 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv64i1( %a, %b, %c, %d) ret %res } define @vector_interleave_nxv64i8_nxv16i8( %a, %b, %c, %d) nounwind { ; ; CHECK-LABEL: vector_interleave_nxv64i8_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: vsetvli a3, zero, e8, m2, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vl2r.v v12, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl2r.v v14, (a1) ; CHECK-NEXT: vl2r.v v8, (a0) ; CHECK-NEXT: vl2r.v v10, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv64i8_nxv16i8: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: vsetvli a3, zero, e8, m2, ta, ma ; ZVBB-NEXT: vsseg4e8.v v8, (a0) ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vl2r.v v12, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl2r.v v14, (a1) ; ZVBB-NEXT: vl2r.v v8, (a0) ; ZVBB-NEXT: vl2r.v v10, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv64i8( %a, %b, %c, %d) ret %res } define @vector_interleave_nxv32i8_nxv8i8( %a, %b, %c, %d) nounwind { ; CHECK-LABEL: vector_interleave_nxv32i8_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: vl1r.v v10, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1r.v v11, (a1) ; CHECK-NEXT: vl1r.v v8, (a0) ; CHECK-NEXT: vl1r.v v9, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32i8_nxv8i8: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e8, m1, ta, ma ; ZVBB-NEXT: vsseg4e8.v v8, (a0) ; ZVBB-NEXT: vl1r.v v10, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1r.v v11, (a1) ; ZVBB-NEXT: vl1r.v v8, (a0) ; ZVBB-NEXT: vl1r.v v9, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv32i8( %a, %b, %c, %d) ret %res } define @vector_interleave_nxv16i32_nxv4i32( %a, %b, %c, %d) nounwind { ; ; CHECK-LABEL: vector_interleave_nxv16i32_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: vsetvli a3, zero, e32, m2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vl2re32.v v12, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl2re32.v v14, (a1) ; CHECK-NEXT: vl2re32.v v8, (a0) ; CHECK-NEXT: vl2re32.v v10, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16i32_nxv4i32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: vsetvli a3, zero, e32, m2, ta, ma ; ZVBB-NEXT: vsseg4e32.v v8, (a0) ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vl2re32.v v12, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl2re32.v v14, (a1) ; ZVBB-NEXT: vl2re32.v v8, (a0) ; ZVBB-NEXT: vl2re32.v v10, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv4i32( %a, %b, %c, %d) ret %res } define @vector_interleave_nxv8i64_nxv2i64( %a, %b, %c, %d) nounwind { ; ; CHECK-LABEL: vector_interleave_nxv8i64_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: vsetvli a3, zero, e64, m2, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vl2re64.v v12, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl2re64.v v14, (a1) ; CHECK-NEXT: vl2re64.v v8, (a0) ; CHECK-NEXT: vl2re64.v v10, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8i64_nxv2i64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: vsetvli a3, zero, e64, m2, ta, ma ; ZVBB-NEXT: vsseg4e64.v v8, (a0) ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vl2re64.v v12, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl2re64.v v14, (a1) ; ZVBB-NEXT: vl2re64.v v8, (a0) ; ZVBB-NEXT: vl2re64.v v10, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv8i64( %a, %b, %c, %d) ret %res } define @vector_interleave_nxv80i1_nxv16i1( %a, %b, %c, %d, %e) nounwind { ; CHECK-LABEL: vector_interleave_nxv80i1_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 10 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: addi a4, sp, 16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 2 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: vmerge.vim v14, v12, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v18, v12, 1, v0 ; CHECK-NEXT: add a2, a4, a1 ; CHECK-NEXT: srli a3, a1, 1 ; CHECK-NEXT: vmv2r.v v20, v14 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 ; CHECK-NEXT: vmv1r.v v21, v18 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmerge.vim v8, v12, 1, v0 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v16, v19 ; CHECK-NEXT: add a5, a2, a1 ; CHECK-NEXT: vmv1r.v v23, v8 ; CHECK-NEXT: vmv1r.v v18, v9 ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: vmerge.vim v24, v12, 1, v0 ; CHECK-NEXT: vsetvli a6, zero, e8, m1, ta, ma ; CHECK-NEXT: vsseg5e8.v v20, (a4) ; CHECK-NEXT: vmv1r.v v19, v25 ; CHECK-NEXT: vsseg5e8.v v15, (a0) ; CHECK-NEXT: vl1r.v v8, (a5) ; CHECK-NEXT: add a5, a5, a1 ; CHECK-NEXT: vl1r.v v10, (a4) ; CHECK-NEXT: add a4, a5, a1 ; CHECK-NEXT: vl1r.v v12, (a4) ; CHECK-NEXT: add a4, a0, a1 ; CHECK-NEXT: vl1r.v v14, (a4) ; CHECK-NEXT: add a4, a4, a1 ; CHECK-NEXT: vl1r.v v9, (a5) ; CHECK-NEXT: add a5, a4, a1 ; CHECK-NEXT: vl1r.v v16, (a5) ; CHECK-NEXT: add a5, a5, a1 ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vl1r.v v11, (a2) ; CHECK-NEXT: vl1r.v v15, (a4) ; CHECK-NEXT: vl1r.v v13, (a0) ; CHECK-NEXT: vl1r.v v17, (a5) ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmsne.vi v18, v8, 0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmsne.vi v8, v14, 0 ; CHECK-NEXT: vmsne.vi v9, v12, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v0, v18, a1 ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vslideup.vx v0, v9, a3 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v16, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 10 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv80i1_nxv16i1: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 10 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZVBB-NEXT: vmv.v.i v12, 0 ; ZVBB-NEXT: addi a4, sp, 16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 2 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add a0, sp, a0 ; ZVBB-NEXT: addi a0, a0, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: vmerge.vim v14, v12, 1, v0 ; ZVBB-NEXT: vmv1r.v v0, v8 ; ZVBB-NEXT: vmerge.vim v18, v12, 1, v0 ; ZVBB-NEXT: add a2, a4, a1 ; ZVBB-NEXT: srli a3, a1, 1 ; ZVBB-NEXT: vmv2r.v v20, v14 ; ZVBB-NEXT: vmv1r.v v0, v9 ; ZVBB-NEXT: vmerge.vim v16, v12, 1, v0 ; ZVBB-NEXT: vmv1r.v v21, v18 ; ZVBB-NEXT: vmv1r.v v0, v10 ; ZVBB-NEXT: vmerge.vim v8, v12, 1, v0 ; ZVBB-NEXT: vmv1r.v v22, v16 ; ZVBB-NEXT: vmv1r.v v16, v19 ; ZVBB-NEXT: add a5, a2, a1 ; ZVBB-NEXT: vmv1r.v v23, v8 ; ZVBB-NEXT: vmv1r.v v18, v9 ; ZVBB-NEXT: vmv1r.v v0, v11 ; ZVBB-NEXT: vmerge.vim v24, v12, 1, v0 ; ZVBB-NEXT: vsetvli a6, zero, e8, m1, ta, ma ; ZVBB-NEXT: vsseg5e8.v v20, (a4) ; ZVBB-NEXT: vmv1r.v v19, v25 ; ZVBB-NEXT: vsseg5e8.v v15, (a0) ; ZVBB-NEXT: vl1r.v v8, (a5) ; ZVBB-NEXT: add a5, a5, a1 ; ZVBB-NEXT: vl1r.v v10, (a4) ; ZVBB-NEXT: add a4, a5, a1 ; ZVBB-NEXT: vl1r.v v12, (a4) ; ZVBB-NEXT: add a4, a0, a1 ; ZVBB-NEXT: vl1r.v v14, (a4) ; ZVBB-NEXT: add a4, a4, a1 ; ZVBB-NEXT: vl1r.v v9, (a5) ; ZVBB-NEXT: add a5, a4, a1 ; ZVBB-NEXT: vl1r.v v16, (a5) ; ZVBB-NEXT: add a5, a5, a1 ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vl1r.v v11, (a2) ; ZVBB-NEXT: vl1r.v v15, (a4) ; ZVBB-NEXT: vl1r.v v13, (a0) ; ZVBB-NEXT: vl1r.v v17, (a5) ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZVBB-NEXT: vmsne.vi v18, v8, 0 ; ZVBB-NEXT: vmsne.vi v0, v10, 0 ; ZVBB-NEXT: vmsne.vi v8, v14, 0 ; ZVBB-NEXT: vmsne.vi v9, v12, 0 ; ZVBB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; ZVBB-NEXT: vslideup.vx v0, v18, a1 ; ZVBB-NEXT: vslideup.vx v9, v8, a1 ; ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v0, v9, a3 ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZVBB-NEXT: vmsne.vi v8, v16, 0 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 10 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave5.nxv80i1( %a, %b, %c, %d, %e) ret %res } define @vector_interleave_nxv80i8_nxv16i8( %a, %b, %c, %d, %e) nounwind { ; ; RV32-LABEL: vector_interleave_nxv80i8_nxv16i8: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 28 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v16 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v18, v12 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 2 ; RV32-NEXT: add a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v16, v8 ; RV32-NEXT: vmv2r.v v22, v16 ; RV32-NEXT: vmv2r.v v24, v18 ; RV32-NEXT: vmv1r.v v26, v20 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v23, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: add a5, a4, a2 ; RV32-NEXT: vmv1r.v v25, v14 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v18, v11 ; RV32-NEXT: vsseg5e8.v v22, (a0) ; RV32-NEXT: vmv1r.v v20, v15 ; RV32-NEXT: vsseg5e8.v v17, (a1) ; RV32-NEXT: vl1r.v v16, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1r.v v17, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1r.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1r.v v11, (a6) ; RV32-NEXT: vl1r.v v8, (a0) ; RV32-NEXT: vl1r.v v9, (a3) ; RV32-NEXT: vl1r.v v14, (a4) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 10 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1r.v v15, (a5) ; RV32-NEXT: vl1r.v v12, (a6) ; RV32-NEXT: vl1r.v v13, (a1) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vs2r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8r.v v16, (a2) ; RV32-NEXT: vl8r.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv80i8_nxv16i8: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 28 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v16 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v18, v12 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 2 ; RV64-NEXT: add a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v16, v8 ; RV64-NEXT: vmv2r.v v22, v16 ; RV64-NEXT: vmv2r.v v24, v18 ; RV64-NEXT: vmv1r.v v26, v20 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v23, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: add a5, a4, a2 ; RV64-NEXT: vmv1r.v v25, v14 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v18, v11 ; RV64-NEXT: vsseg5e8.v v22, (a0) ; RV64-NEXT: vmv1r.v v20, v15 ; RV64-NEXT: vsseg5e8.v v17, (a1) ; RV64-NEXT: vl1r.v v16, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1r.v v17, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1r.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1r.v v11, (a6) ; RV64-NEXT: vl1r.v v8, (a0) ; RV64-NEXT: vl1r.v v9, (a3) ; RV64-NEXT: vl1r.v v14, (a4) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 10 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1r.v v15, (a5) ; RV64-NEXT: vl1r.v v12, (a6) ; RV64-NEXT: vl1r.v v13, (a1) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vs2r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8r.v v16, (a2) ; RV64-NEXT: vl8r.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv80i8_nxv16i8: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a1, 28 ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v16 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v18, v12 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 2 ; ZVBB-RV32-NEXT: add a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v16, v8 ; ZVBB-RV32-NEXT: vmv2r.v v22, v16 ; ZVBB-RV32-NEXT: vmv2r.v v24, v18 ; ZVBB-RV32-NEXT: vmv1r.v v26, v20 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v23, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: add a5, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v25, v14 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v18, v11 ; ZVBB-RV32-NEXT: vsseg5e8.v v22, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v20, v15 ; ZVBB-RV32-NEXT: vsseg5e8.v v17, (a1) ; ZVBB-RV32-NEXT: vl1r.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1r.v v17, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1r.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1r.v v11, (a6) ; ZVBB-RV32-NEXT: vl1r.v v8, (a0) ; ZVBB-RV32-NEXT: vl1r.v v9, (a3) ; ZVBB-RV32-NEXT: vl1r.v v14, (a4) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 10 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1r.v v15, (a5) ; ZVBB-RV32-NEXT: vl1r.v v12, (a6) ; ZVBB-RV32-NEXT: vl1r.v v13, (a1) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vs2r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8r.v v16, (a2) ; ZVBB-RV32-NEXT: vl8r.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv80i8_nxv16i8: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a1, 28 ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v16 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v18, v12 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 2 ; ZVBB-RV64-NEXT: add a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v16, v8 ; ZVBB-RV64-NEXT: vmv2r.v v22, v16 ; ZVBB-RV64-NEXT: vmv2r.v v24, v18 ; ZVBB-RV64-NEXT: vmv1r.v v26, v20 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v23, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: add a5, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v25, v14 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v18, v11 ; ZVBB-RV64-NEXT: vsseg5e8.v v22, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v20, v15 ; ZVBB-RV64-NEXT: vsseg5e8.v v17, (a1) ; ZVBB-RV64-NEXT: vl1r.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1r.v v17, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1r.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1r.v v11, (a6) ; ZVBB-RV64-NEXT: vl1r.v v8, (a0) ; ZVBB-RV64-NEXT: vl1r.v v9, (a3) ; ZVBB-RV64-NEXT: vl1r.v v14, (a4) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 10 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1r.v v15, (a5) ; ZVBB-RV64-NEXT: vl1r.v v12, (a6) ; ZVBB-RV64-NEXT: vl1r.v v13, (a1) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vs2r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8r.v v16, (a2) ; ZVBB-RV64-NEXT: vl8r.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv80i8_nxv16i8: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 28 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v16 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v18, v12 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 2 ; ZIP-NEXT: add a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v16, v8 ; ZIP-NEXT: vmv2r.v v22, v16 ; ZIP-NEXT: vmv2r.v v24, v18 ; ZIP-NEXT: vmv1r.v v26, v20 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v23, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: add a5, a4, a2 ; ZIP-NEXT: vmv1r.v v25, v14 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v18, v11 ; ZIP-NEXT: vsseg5e8.v v22, (a0) ; ZIP-NEXT: vmv1r.v v20, v15 ; ZIP-NEXT: vsseg5e8.v v17, (a1) ; ZIP-NEXT: vl1r.v v16, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1r.v v17, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1r.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1r.v v11, (a6) ; ZIP-NEXT: vl1r.v v8, (a0) ; ZIP-NEXT: vl1r.v v9, (a3) ; ZIP-NEXT: vl1r.v v14, (a4) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 10 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1r.v v15, (a5) ; ZIP-NEXT: vl1r.v v12, (a6) ; ZIP-NEXT: vl1r.v v13, (a1) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vs2r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8r.v v16, (a2) ; ZIP-NEXT: vl8r.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave5.nxv80i8( %a, %b, %c, %d, %e) ret %res } define @vector_interleave_nxv40i8_nxv8i8( %a, %b, %c, %d, %e) nounwind { ; CHECK-LABEL: vector_interleave_nxv40i8_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 2 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: vl1r.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1r.v v11, (a3) ; CHECK-NEXT: vl1r.v v8, (a0) ; CHECK-NEXT: vl1r.v v9, (a2) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1r.v v12, (a1) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 2 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv40i8_nxv8i8: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 2 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e8, m1, ta, ma ; ZVBB-NEXT: vsseg5e8.v v8, (a0) ; ZVBB-NEXT: vl1r.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1r.v v11, (a3) ; ZVBB-NEXT: vl1r.v v8, (a0) ; ZVBB-NEXT: vl1r.v v9, (a2) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1r.v v12, (a1) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 2 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave5.nxv40i8( %a, %b, %c, %d, %e) ret %res } define @vector_interleave_nxv20i32_nxv4i32( %a, %b, %c, %d, %e) nounwind { ; ; RV32-LABEL: vector_interleave_nxv20i32_nxv4i32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 28 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v16 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v18, v12 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 2 ; RV32-NEXT: add a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v16, v8 ; RV32-NEXT: vmv2r.v v22, v16 ; RV32-NEXT: vmv2r.v v24, v18 ; RV32-NEXT: vmv1r.v v26, v20 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v23, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: add a5, a4, a2 ; RV32-NEXT: vmv1r.v v25, v14 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v18, v11 ; RV32-NEXT: vsseg5e32.v v22, (a0) ; RV32-NEXT: vmv1r.v v20, v15 ; RV32-NEXT: vsseg5e32.v v17, (a1) ; RV32-NEXT: vl1re32.v v16, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v17, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1re32.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v11, (a6) ; RV32-NEXT: vl1re32.v v8, (a0) ; RV32-NEXT: vl1re32.v v9, (a3) ; RV32-NEXT: vl1re32.v v14, (a4) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 10 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v15, (a5) ; RV32-NEXT: vl1re32.v v12, (a6) ; RV32-NEXT: vl1re32.v v13, (a1) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vs2r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8re32.v v16, (a2) ; RV32-NEXT: vl8re32.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv20i32_nxv4i32: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 28 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v16 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v18, v12 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 2 ; RV64-NEXT: add a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v16, v8 ; RV64-NEXT: vmv2r.v v22, v16 ; RV64-NEXT: vmv2r.v v24, v18 ; RV64-NEXT: vmv1r.v v26, v20 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v23, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: add a5, a4, a2 ; RV64-NEXT: vmv1r.v v25, v14 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v18, v11 ; RV64-NEXT: vsseg5e32.v v22, (a0) ; RV64-NEXT: vmv1r.v v20, v15 ; RV64-NEXT: vsseg5e32.v v17, (a1) ; RV64-NEXT: vl1re32.v v16, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v17, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1re32.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v11, (a6) ; RV64-NEXT: vl1re32.v v8, (a0) ; RV64-NEXT: vl1re32.v v9, (a3) ; RV64-NEXT: vl1re32.v v14, (a4) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 10 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v15, (a5) ; RV64-NEXT: vl1re32.v v12, (a6) ; RV64-NEXT: vl1re32.v v13, (a1) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vs2r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8re32.v v16, (a2) ; RV64-NEXT: vl8re32.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv20i32_nxv4i32: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a1, 28 ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v16 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v18, v12 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 2 ; ZVBB-RV32-NEXT: add a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v16, v8 ; ZVBB-RV32-NEXT: vmv2r.v v22, v16 ; ZVBB-RV32-NEXT: vmv2r.v v24, v18 ; ZVBB-RV32-NEXT: vmv1r.v v26, v20 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v23, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: add a5, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v25, v14 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v18, v11 ; ZVBB-RV32-NEXT: vsseg5e32.v v22, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v20, v15 ; ZVBB-RV32-NEXT: vsseg5e32.v v17, (a1) ; ZVBB-RV32-NEXT: vl1re32.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v17, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1re32.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v11, (a6) ; ZVBB-RV32-NEXT: vl1re32.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re32.v v9, (a3) ; ZVBB-RV32-NEXT: vl1re32.v v14, (a4) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 10 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v15, (a5) ; ZVBB-RV32-NEXT: vl1re32.v v12, (a6) ; ZVBB-RV32-NEXT: vl1re32.v v13, (a1) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vs2r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8re32.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re32.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv20i32_nxv4i32: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a1, 28 ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v16 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v18, v12 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 2 ; ZVBB-RV64-NEXT: add a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v16, v8 ; ZVBB-RV64-NEXT: vmv2r.v v22, v16 ; ZVBB-RV64-NEXT: vmv2r.v v24, v18 ; ZVBB-RV64-NEXT: vmv1r.v v26, v20 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v23, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: add a5, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v25, v14 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v18, v11 ; ZVBB-RV64-NEXT: vsseg5e32.v v22, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v20, v15 ; ZVBB-RV64-NEXT: vsseg5e32.v v17, (a1) ; ZVBB-RV64-NEXT: vl1re32.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v17, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1re32.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v11, (a6) ; ZVBB-RV64-NEXT: vl1re32.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re32.v v9, (a3) ; ZVBB-RV64-NEXT: vl1re32.v v14, (a4) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 10 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v15, (a5) ; ZVBB-RV64-NEXT: vl1re32.v v12, (a6) ; ZVBB-RV64-NEXT: vl1re32.v v13, (a1) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vs2r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8re32.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re32.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv20i32_nxv4i32: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 28 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v16 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v18, v12 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 2 ; ZIP-NEXT: add a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v16, v8 ; ZIP-NEXT: vmv2r.v v22, v16 ; ZIP-NEXT: vmv2r.v v24, v18 ; ZIP-NEXT: vmv1r.v v26, v20 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v23, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: add a5, a4, a2 ; ZIP-NEXT: vmv1r.v v25, v14 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v18, v11 ; ZIP-NEXT: vsseg5e32.v v22, (a0) ; ZIP-NEXT: vmv1r.v v20, v15 ; ZIP-NEXT: vsseg5e32.v v17, (a1) ; ZIP-NEXT: vl1re32.v v16, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v17, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1re32.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v11, (a6) ; ZIP-NEXT: vl1re32.v v8, (a0) ; ZIP-NEXT: vl1re32.v v9, (a3) ; ZIP-NEXT: vl1re32.v v14, (a4) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 10 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v15, (a5) ; ZIP-NEXT: vl1re32.v v12, (a6) ; ZIP-NEXT: vl1re32.v v13, (a1) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vs2r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8re32.v v16, (a2) ; ZIP-NEXT: vl8re32.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave5.nxv20i32( %a, %b, %c, %d, %e) ret %res } define @vector_interleave_nxv10i64_nxv2i64( %a, %b, %c, %d, %e) nounwind { ; ; RV32-LABEL: vector_interleave_nxv10i64_nxv2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 28 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v16 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v18, v12 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 2 ; RV32-NEXT: add a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v16, v8 ; RV32-NEXT: vmv2r.v v22, v16 ; RV32-NEXT: vmv2r.v v24, v18 ; RV32-NEXT: vmv1r.v v26, v20 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v23, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: add a5, a4, a2 ; RV32-NEXT: vmv1r.v v25, v14 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v18, v11 ; RV32-NEXT: vsseg5e64.v v22, (a0) ; RV32-NEXT: vmv1r.v v20, v15 ; RV32-NEXT: vsseg5e64.v v17, (a1) ; RV32-NEXT: vl1re64.v v16, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v17, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1re64.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v11, (a6) ; RV32-NEXT: vl1re64.v v8, (a0) ; RV32-NEXT: vl1re64.v v9, (a3) ; RV32-NEXT: vl1re64.v v14, (a4) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 10 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v15, (a5) ; RV32-NEXT: vl1re64.v v12, (a6) ; RV32-NEXT: vl1re64.v v13, (a1) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vs2r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8re64.v v16, (a2) ; RV32-NEXT: vl8re64.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv10i64_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 28 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v16 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v18, v12 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 2 ; RV64-NEXT: add a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v16, v8 ; RV64-NEXT: vmv2r.v v22, v16 ; RV64-NEXT: vmv2r.v v24, v18 ; RV64-NEXT: vmv1r.v v26, v20 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v23, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: add a5, a4, a2 ; RV64-NEXT: vmv1r.v v25, v14 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v18, v11 ; RV64-NEXT: vsseg5e64.v v22, (a0) ; RV64-NEXT: vmv1r.v v20, v15 ; RV64-NEXT: vsseg5e64.v v17, (a1) ; RV64-NEXT: vl1re64.v v16, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v17, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1re64.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v11, (a6) ; RV64-NEXT: vl1re64.v v8, (a0) ; RV64-NEXT: vl1re64.v v9, (a3) ; RV64-NEXT: vl1re64.v v14, (a4) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 10 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v15, (a5) ; RV64-NEXT: vl1re64.v v12, (a6) ; RV64-NEXT: vl1re64.v v13, (a1) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vs2r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8re64.v v16, (a2) ; RV64-NEXT: vl8re64.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv10i64_nxv2i64: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a1, 28 ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v16 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v18, v12 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 2 ; ZVBB-RV32-NEXT: add a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v16, v8 ; ZVBB-RV32-NEXT: vmv2r.v v22, v16 ; ZVBB-RV32-NEXT: vmv2r.v v24, v18 ; ZVBB-RV32-NEXT: vmv1r.v v26, v20 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v23, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: add a5, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v25, v14 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v18, v11 ; ZVBB-RV32-NEXT: vsseg5e64.v v22, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v20, v15 ; ZVBB-RV32-NEXT: vsseg5e64.v v17, (a1) ; ZVBB-RV32-NEXT: vl1re64.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v17, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1re64.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v11, (a6) ; ZVBB-RV32-NEXT: vl1re64.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re64.v v9, (a3) ; ZVBB-RV32-NEXT: vl1re64.v v14, (a4) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 10 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v15, (a5) ; ZVBB-RV32-NEXT: vl1re64.v v12, (a6) ; ZVBB-RV32-NEXT: vl1re64.v v13, (a1) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vs2r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8re64.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re64.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv10i64_nxv2i64: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a1, 28 ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v16 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v18, v12 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 2 ; ZVBB-RV64-NEXT: add a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v16, v8 ; ZVBB-RV64-NEXT: vmv2r.v v22, v16 ; ZVBB-RV64-NEXT: vmv2r.v v24, v18 ; ZVBB-RV64-NEXT: vmv1r.v v26, v20 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v23, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: add a5, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v25, v14 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v18, v11 ; ZVBB-RV64-NEXT: vsseg5e64.v v22, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v20, v15 ; ZVBB-RV64-NEXT: vsseg5e64.v v17, (a1) ; ZVBB-RV64-NEXT: vl1re64.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v17, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1re64.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v11, (a6) ; ZVBB-RV64-NEXT: vl1re64.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re64.v v9, (a3) ; ZVBB-RV64-NEXT: vl1re64.v v14, (a4) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 10 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v15, (a5) ; ZVBB-RV64-NEXT: vl1re64.v v12, (a6) ; ZVBB-RV64-NEXT: vl1re64.v v13, (a1) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vs2r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8re64.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re64.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv10i64_nxv2i64: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 28 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v16 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v18, v12 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 2 ; ZIP-NEXT: add a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v16, v8 ; ZIP-NEXT: vmv2r.v v22, v16 ; ZIP-NEXT: vmv2r.v v24, v18 ; ZIP-NEXT: vmv1r.v v26, v20 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v23, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: add a5, a4, a2 ; ZIP-NEXT: vmv1r.v v25, v14 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v18, v11 ; ZIP-NEXT: vsseg5e64.v v22, (a0) ; ZIP-NEXT: vmv1r.v v20, v15 ; ZIP-NEXT: vsseg5e64.v v17, (a1) ; ZIP-NEXT: vl1re64.v v16, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v17, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1re64.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v11, (a6) ; ZIP-NEXT: vl1re64.v v8, (a0) ; ZIP-NEXT: vl1re64.v v9, (a3) ; ZIP-NEXT: vl1re64.v v14, (a4) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 10 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v15, (a5) ; ZIP-NEXT: vl1re64.v v12, (a6) ; ZIP-NEXT: vl1re64.v v13, (a1) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vs2r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8re64.v v16, (a2) ; ZIP-NEXT: vl8re64.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave5.nxv10i64( %a, %b, %c, %d, %e) ret %res } define @vector_interleave_nxv96i1_nxv16i1( %a, %b, %c, %d, %e, %f) nounwind { ; CHECK-LABEL: vector_interleave_nxv96i1_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 12 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v20, 0 ; CHECK-NEXT: vmerge.vim v14, v20, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v22, v20, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmv1r.v v16, v23 ; CHECK-NEXT: vmerge.vim v8, v20, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vmv1r.v v17, v9 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmerge.vim v24, v20, 1, v0 ; CHECK-NEXT: addi a4, sp, 16 ; CHECK-NEXT: vmv1r.v v18, v25 ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: vmerge.vim v26, v20, 1, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: vmv1r.v v19, v27 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmerge.vim v10, v20, 1, v0 ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: vmv1r.v v20, v11 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma ; CHECK-NEXT: vsseg6e8.v v15, (a0) ; CHECK-NEXT: vmv1r.v v15, v22 ; CHECK-NEXT: add a5, a4, a1 ; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: srli a3, a1, 1 ; CHECK-NEXT: vmv1r.v v17, v24 ; CHECK-NEXT: add a6, a5, a1 ; CHECK-NEXT: vmv1r.v v18, v26 ; CHECK-NEXT: add a7, a2, a1 ; CHECK-NEXT: vmv1r.v v19, v10 ; CHECK-NEXT: vsseg6e8.v v14, (a4) ; CHECK-NEXT: vl1r.v v8, (a0) ; CHECK-NEXT: add a0, a6, a1 ; CHECK-NEXT: vl1r.v v10, (a6) ; CHECK-NEXT: add a6, a7, a1 ; CHECK-NEXT: vl1r.v v12, (a4) ; CHECK-NEXT: add a4, a0, a1 ; CHECK-NEXT: vl1r.v v14, (a7) ; CHECK-NEXT: add a7, a6, a1 ; CHECK-NEXT: vl1r.v v16, (a4) ; CHECK-NEXT: add a4, a4, a1 ; CHECK-NEXT: vl1r.v v18, (a7) ; CHECK-NEXT: add a7, a7, a1 ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vl1r.v v9, (a2) ; CHECK-NEXT: vl1r.v v17, (a4) ; CHECK-NEXT: vl1r.v v11, (a0) ; CHECK-NEXT: vl1r.v v13, (a5) ; CHECK-NEXT: vl1r.v v19, (a7) ; CHECK-NEXT: vl1r.v v15, (a6) ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmsne.vi v20, v8, 0 ; CHECK-NEXT: vmsne.vi v9, v16, 0 ; CHECK-NEXT: vmsne.vi v16, v10, 0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vmsne.vi v10, v18, 0 ; CHECK-NEXT: vmsne.vi v8, v14, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v9, v20, a1 ; CHECK-NEXT: vslideup.vx v0, v16, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vslideup.vx v0, v9, a3 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 12 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv96i1_nxv16i1: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 12 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZVBB-NEXT: vmv.v.i v20, 0 ; ZVBB-NEXT: vmerge.vim v14, v20, 1, v0 ; ZVBB-NEXT: vmv1r.v v0, v8 ; ZVBB-NEXT: vmerge.vim v22, v20, 1, v0 ; ZVBB-NEXT: vmv1r.v v0, v9 ; ZVBB-NEXT: vmv1r.v v16, v23 ; ZVBB-NEXT: vmerge.vim v8, v20, 1, v0 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add a0, sp, a0 ; ZVBB-NEXT: addi a0, a0, 16 ; ZVBB-NEXT: vmv1r.v v17, v9 ; ZVBB-NEXT: vmv1r.v v0, v10 ; ZVBB-NEXT: vmerge.vim v24, v20, 1, v0 ; ZVBB-NEXT: addi a4, sp, 16 ; ZVBB-NEXT: vmv1r.v v18, v25 ; ZVBB-NEXT: vmv1r.v v0, v11 ; ZVBB-NEXT: vmerge.vim v26, v20, 1, v0 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: vmv1r.v v19, v27 ; ZVBB-NEXT: vmv1r.v v0, v12 ; ZVBB-NEXT: vmerge.vim v10, v20, 1, v0 ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: vmv1r.v v20, v11 ; ZVBB-NEXT: vsetvli a3, zero, e8, m1, ta, ma ; ZVBB-NEXT: vsseg6e8.v v15, (a0) ; ZVBB-NEXT: vmv1r.v v15, v22 ; ZVBB-NEXT: add a5, a4, a1 ; ZVBB-NEXT: vmv1r.v v16, v8 ; ZVBB-NEXT: srli a3, a1, 1 ; ZVBB-NEXT: vmv1r.v v17, v24 ; ZVBB-NEXT: add a6, a5, a1 ; ZVBB-NEXT: vmv1r.v v18, v26 ; ZVBB-NEXT: add a7, a2, a1 ; ZVBB-NEXT: vmv1r.v v19, v10 ; ZVBB-NEXT: vsseg6e8.v v14, (a4) ; ZVBB-NEXT: vl1r.v v8, (a0) ; ZVBB-NEXT: add a0, a6, a1 ; ZVBB-NEXT: vl1r.v v10, (a6) ; ZVBB-NEXT: add a6, a7, a1 ; ZVBB-NEXT: vl1r.v v12, (a4) ; ZVBB-NEXT: add a4, a0, a1 ; ZVBB-NEXT: vl1r.v v14, (a7) ; ZVBB-NEXT: add a7, a6, a1 ; ZVBB-NEXT: vl1r.v v16, (a4) ; ZVBB-NEXT: add a4, a4, a1 ; ZVBB-NEXT: vl1r.v v18, (a7) ; ZVBB-NEXT: add a7, a7, a1 ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vl1r.v v9, (a2) ; ZVBB-NEXT: vl1r.v v17, (a4) ; ZVBB-NEXT: vl1r.v v11, (a0) ; ZVBB-NEXT: vl1r.v v13, (a5) ; ZVBB-NEXT: vl1r.v v19, (a7) ; ZVBB-NEXT: vl1r.v v15, (a6) ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZVBB-NEXT: vmsne.vi v20, v8, 0 ; ZVBB-NEXT: vmsne.vi v9, v16, 0 ; ZVBB-NEXT: vmsne.vi v16, v10, 0 ; ZVBB-NEXT: vmsne.vi v0, v12, 0 ; ZVBB-NEXT: vmsne.vi v10, v18, 0 ; ZVBB-NEXT: vmsne.vi v8, v14, 0 ; ZVBB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v20, a1 ; ZVBB-NEXT: vslideup.vx v0, v16, a1 ; ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v0, v9, a3 ; ZVBB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v10, a1 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 12 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave6.nxv96i1( %a, %b, %c, %d, %e, %f) ret %res } define @vector_interleave_nxv96i8_nxv16i8( %a, %b, %c, %d, %e, %f) nounwind { ; ; RV32-LABEL: vector_interleave_nxv96i8_nxv16i8: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 28 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v14 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: vmv2r.v v24, v10 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a0, 6 ; RV32-NEXT: mul a1, a1, a0 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv1r.v v10, v25 ; RV32-NEXT: vmv1r.v v11, v23 ; RV32-NEXT: vmv1r.v v12, v21 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv1r.v v13, v17 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv1r.v v14, v19 ; RV32-NEXT: vsseg6e8.v v9, (a1) ; RV32-NEXT: vmv1r.v v9, v24 ; RV32-NEXT: add a5, a1, a2 ; RV32-NEXT: vmv1r.v v10, v22 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v11, v20 ; RV32-NEXT: add a4, a3, a2 ; RV32-NEXT: vmv1r.v v12, v16 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v13, v18 ; RV32-NEXT: vsseg6e8.v v8, (a0) ; RV32-NEXT: vl1r.v v14, (a1) ; RV32-NEXT: add a1, a6, a2 ; RV32-NEXT: vl1r.v v15, (a5) ; RV32-NEXT: add a5, a1, a2 ; RV32-NEXT: vl1r.v v18, (a5) ; RV32-NEXT: add a5, a5, a2 ; RV32-NEXT: vl1r.v v19, (a5) ; RV32-NEXT: add a5, a4, a2 ; RV32-NEXT: vl1r.v v16, (a6) ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vl1r.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1r.v v13, (a6) ; RV32-NEXT: csrr a6, vlenb ; RV32-NEXT: li a7, 12 ; RV32-NEXT: mul a6, a6, a7 ; RV32-NEXT: add a6, sp, a6 ; RV32-NEXT: addi a6, a6, 64 ; RV32-NEXT: vl1r.v v17, (a1) ; RV32-NEXT: vl1r.v v10, (a4) ; RV32-NEXT: vl1r.v v11, (a5) ; RV32-NEXT: vl1r.v v8, (a0) ; RV32-NEXT: vl1r.v v9, (a3) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a6, a2 ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a6) ; RV32-NEXT: vl8r.v v16, (a2) ; RV32-NEXT: vl8r.v v8, (a6) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv96i8_nxv16i8: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 28 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v14 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: vmv2r.v v24, v10 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: li a0, 6 ; RV64-NEXT: mul a1, a1, a0 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv1r.v v10, v25 ; RV64-NEXT: vmv1r.v v11, v23 ; RV64-NEXT: vmv1r.v v12, v21 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv1r.v v13, v17 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv1r.v v14, v19 ; RV64-NEXT: vsseg6e8.v v9, (a1) ; RV64-NEXT: vmv1r.v v9, v24 ; RV64-NEXT: add a5, a1, a2 ; RV64-NEXT: vmv1r.v v10, v22 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v11, v20 ; RV64-NEXT: add a4, a3, a2 ; RV64-NEXT: vmv1r.v v12, v16 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v13, v18 ; RV64-NEXT: vsseg6e8.v v8, (a0) ; RV64-NEXT: vl1r.v v14, (a1) ; RV64-NEXT: add a1, a6, a2 ; RV64-NEXT: vl1r.v v15, (a5) ; RV64-NEXT: add a5, a1, a2 ; RV64-NEXT: vl1r.v v18, (a5) ; RV64-NEXT: add a5, a5, a2 ; RV64-NEXT: vl1r.v v19, (a5) ; RV64-NEXT: add a5, a4, a2 ; RV64-NEXT: vl1r.v v16, (a6) ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vl1r.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1r.v v13, (a6) ; RV64-NEXT: csrr a6, vlenb ; RV64-NEXT: li a7, 12 ; RV64-NEXT: mul a6, a6, a7 ; RV64-NEXT: add a6, sp, a6 ; RV64-NEXT: addi a6, a6, 64 ; RV64-NEXT: vl1r.v v17, (a1) ; RV64-NEXT: vl1r.v v10, (a4) ; RV64-NEXT: vl1r.v v11, (a5) ; RV64-NEXT: vl1r.v v8, (a0) ; RV64-NEXT: vl1r.v v9, (a3) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a6, a2 ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a6) ; RV64-NEXT: vl8r.v v16, (a2) ; RV64-NEXT: vl8r.v v8, (a6) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv96i8_nxv16i8: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a1, 28 ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v14 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: vmv2r.v v24, v10 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: li a0, 6 ; ZVBB-RV32-NEXT: mul a1, a1, a0 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv1r.v v10, v25 ; ZVBB-RV32-NEXT: vmv1r.v v11, v23 ; ZVBB-RV32-NEXT: vmv1r.v v12, v21 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv1r.v v13, v17 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv1r.v v14, v19 ; ZVBB-RV32-NEXT: vsseg6e8.v v9, (a1) ; ZVBB-RV32-NEXT: vmv1r.v v9, v24 ; ZVBB-RV32-NEXT: add a5, a1, a2 ; ZVBB-RV32-NEXT: vmv1r.v v10, v22 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v11, v20 ; ZVBB-RV32-NEXT: add a4, a3, a2 ; ZVBB-RV32-NEXT: vmv1r.v v12, v16 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v13, v18 ; ZVBB-RV32-NEXT: vsseg6e8.v v8, (a0) ; ZVBB-RV32-NEXT: vl1r.v v14, (a1) ; ZVBB-RV32-NEXT: add a1, a6, a2 ; ZVBB-RV32-NEXT: vl1r.v v15, (a5) ; ZVBB-RV32-NEXT: add a5, a1, a2 ; ZVBB-RV32-NEXT: vl1r.v v18, (a5) ; ZVBB-RV32-NEXT: add a5, a5, a2 ; ZVBB-RV32-NEXT: vl1r.v v19, (a5) ; ZVBB-RV32-NEXT: add a5, a4, a2 ; ZVBB-RV32-NEXT: vl1r.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vl1r.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1r.v v13, (a6) ; ZVBB-RV32-NEXT: csrr a6, vlenb ; ZVBB-RV32-NEXT: li a7, 12 ; ZVBB-RV32-NEXT: mul a6, a6, a7 ; ZVBB-RV32-NEXT: add a6, sp, a6 ; ZVBB-RV32-NEXT: addi a6, a6, 64 ; ZVBB-RV32-NEXT: vl1r.v v17, (a1) ; ZVBB-RV32-NEXT: vl1r.v v10, (a4) ; ZVBB-RV32-NEXT: vl1r.v v11, (a5) ; ZVBB-RV32-NEXT: vl1r.v v8, (a0) ; ZVBB-RV32-NEXT: vl1r.v v9, (a3) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a6, a2 ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a6) ; ZVBB-RV32-NEXT: vl8r.v v16, (a2) ; ZVBB-RV32-NEXT: vl8r.v v8, (a6) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv96i8_nxv16i8: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a1, 28 ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v14 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: vmv2r.v v24, v10 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: li a0, 6 ; ZVBB-RV64-NEXT: mul a1, a1, a0 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv1r.v v10, v25 ; ZVBB-RV64-NEXT: vmv1r.v v11, v23 ; ZVBB-RV64-NEXT: vmv1r.v v12, v21 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv1r.v v13, v17 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv1r.v v14, v19 ; ZVBB-RV64-NEXT: vsseg6e8.v v9, (a1) ; ZVBB-RV64-NEXT: vmv1r.v v9, v24 ; ZVBB-RV64-NEXT: add a5, a1, a2 ; ZVBB-RV64-NEXT: vmv1r.v v10, v22 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v11, v20 ; ZVBB-RV64-NEXT: add a4, a3, a2 ; ZVBB-RV64-NEXT: vmv1r.v v12, v16 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v13, v18 ; ZVBB-RV64-NEXT: vsseg6e8.v v8, (a0) ; ZVBB-RV64-NEXT: vl1r.v v14, (a1) ; ZVBB-RV64-NEXT: add a1, a6, a2 ; ZVBB-RV64-NEXT: vl1r.v v15, (a5) ; ZVBB-RV64-NEXT: add a5, a1, a2 ; ZVBB-RV64-NEXT: vl1r.v v18, (a5) ; ZVBB-RV64-NEXT: add a5, a5, a2 ; ZVBB-RV64-NEXT: vl1r.v v19, (a5) ; ZVBB-RV64-NEXT: add a5, a4, a2 ; ZVBB-RV64-NEXT: vl1r.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vl1r.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1r.v v13, (a6) ; ZVBB-RV64-NEXT: csrr a6, vlenb ; ZVBB-RV64-NEXT: li a7, 12 ; ZVBB-RV64-NEXT: mul a6, a6, a7 ; ZVBB-RV64-NEXT: add a6, sp, a6 ; ZVBB-RV64-NEXT: addi a6, a6, 64 ; ZVBB-RV64-NEXT: vl1r.v v17, (a1) ; ZVBB-RV64-NEXT: vl1r.v v10, (a4) ; ZVBB-RV64-NEXT: vl1r.v v11, (a5) ; ZVBB-RV64-NEXT: vl1r.v v8, (a0) ; ZVBB-RV64-NEXT: vl1r.v v9, (a3) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a6, a2 ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a6) ; ZVBB-RV64-NEXT: vl8r.v v16, (a2) ; ZVBB-RV64-NEXT: vl8r.v v8, (a6) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv96i8_nxv16i8: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 28 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v14 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: vmv2r.v v24, v10 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: li a0, 6 ; ZIP-NEXT: mul a1, a1, a0 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv1r.v v10, v25 ; ZIP-NEXT: vmv1r.v v11, v23 ; ZIP-NEXT: vmv1r.v v12, v21 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv1r.v v13, v17 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv1r.v v14, v19 ; ZIP-NEXT: vsseg6e8.v v9, (a1) ; ZIP-NEXT: vmv1r.v v9, v24 ; ZIP-NEXT: add a5, a1, a2 ; ZIP-NEXT: vmv1r.v v10, v22 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v11, v20 ; ZIP-NEXT: add a4, a3, a2 ; ZIP-NEXT: vmv1r.v v12, v16 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v13, v18 ; ZIP-NEXT: vsseg6e8.v v8, (a0) ; ZIP-NEXT: vl1r.v v14, (a1) ; ZIP-NEXT: add a1, a6, a2 ; ZIP-NEXT: vl1r.v v15, (a5) ; ZIP-NEXT: add a5, a1, a2 ; ZIP-NEXT: vl1r.v v18, (a5) ; ZIP-NEXT: add a5, a5, a2 ; ZIP-NEXT: vl1r.v v19, (a5) ; ZIP-NEXT: add a5, a4, a2 ; ZIP-NEXT: vl1r.v v16, (a6) ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vl1r.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1r.v v13, (a6) ; ZIP-NEXT: csrr a6, vlenb ; ZIP-NEXT: li a7, 12 ; ZIP-NEXT: mul a6, a6, a7 ; ZIP-NEXT: add a6, sp, a6 ; ZIP-NEXT: addi a6, a6, 64 ; ZIP-NEXT: vl1r.v v17, (a1) ; ZIP-NEXT: vl1r.v v10, (a4) ; ZIP-NEXT: vl1r.v v11, (a5) ; ZIP-NEXT: vl1r.v v8, (a0) ; ZIP-NEXT: vl1r.v v9, (a3) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a6, a2 ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a6) ; ZIP-NEXT: vl8r.v v16, (a2) ; ZIP-NEXT: vl8r.v v8, (a6) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave6.nxv96i8( %a, %b, %c, %d, %e, %f) ret %res } define @vector_interleave_nxv48i8_nxv8i8( %a, %b, %c, %d, %e, %f) nounwind { ; CHECK-LABEL: vector_interleave_nxv48i8_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e8, m1, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: vl1r.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1r.v v11, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1r.v v8, (a0) ; CHECK-NEXT: vl1r.v v9, (a2) ; CHECK-NEXT: vl1r.v v12, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1r.v v13, (a1) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv48i8_nxv8i8: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e8, m1, ta, ma ; ZVBB-NEXT: vsseg6e8.v v8, (a0) ; ZVBB-NEXT: vl1r.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1r.v v11, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1r.v v8, (a0) ; ZVBB-NEXT: vl1r.v v9, (a2) ; ZVBB-NEXT: vl1r.v v12, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1r.v v13, (a1) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave6.nxv48i8( %a, %b, %c, %d, %e, %f) ret %res } define @vector_interleave_nxv24i32_nxv4i32( %a, %b, %c, %d, %e, %f) nounwind { ; ; RV32-LABEL: vector_interleave_nxv24i32_nxv4i32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 28 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v14 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: vmv2r.v v24, v10 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a0, 6 ; RV32-NEXT: mul a1, a1, a0 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv1r.v v10, v25 ; RV32-NEXT: vmv1r.v v11, v23 ; RV32-NEXT: vmv1r.v v12, v21 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv1r.v v13, v17 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv1r.v v14, v19 ; RV32-NEXT: vsseg6e32.v v9, (a1) ; RV32-NEXT: vmv1r.v v9, v24 ; RV32-NEXT: add a5, a1, a2 ; RV32-NEXT: vmv1r.v v10, v22 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v11, v20 ; RV32-NEXT: add a4, a3, a2 ; RV32-NEXT: vmv1r.v v12, v16 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v13, v18 ; RV32-NEXT: vsseg6e32.v v8, (a0) ; RV32-NEXT: vl1re32.v v14, (a1) ; RV32-NEXT: add a1, a6, a2 ; RV32-NEXT: vl1re32.v v15, (a5) ; RV32-NEXT: add a5, a1, a2 ; RV32-NEXT: vl1re32.v v18, (a5) ; RV32-NEXT: add a5, a5, a2 ; RV32-NEXT: vl1re32.v v19, (a5) ; RV32-NEXT: add a5, a4, a2 ; RV32-NEXT: vl1re32.v v16, (a6) ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vl1re32.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v13, (a6) ; RV32-NEXT: csrr a6, vlenb ; RV32-NEXT: li a7, 12 ; RV32-NEXT: mul a6, a6, a7 ; RV32-NEXT: add a6, sp, a6 ; RV32-NEXT: addi a6, a6, 64 ; RV32-NEXT: vl1re32.v v17, (a1) ; RV32-NEXT: vl1re32.v v10, (a4) ; RV32-NEXT: vl1re32.v v11, (a5) ; RV32-NEXT: vl1re32.v v8, (a0) ; RV32-NEXT: vl1re32.v v9, (a3) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a6, a2 ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a6) ; RV32-NEXT: vl8re32.v v16, (a2) ; RV32-NEXT: vl8re32.v v8, (a6) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv24i32_nxv4i32: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 28 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v14 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: vmv2r.v v24, v10 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: li a0, 6 ; RV64-NEXT: mul a1, a1, a0 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv1r.v v10, v25 ; RV64-NEXT: vmv1r.v v11, v23 ; RV64-NEXT: vmv1r.v v12, v21 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv1r.v v13, v17 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv1r.v v14, v19 ; RV64-NEXT: vsseg6e32.v v9, (a1) ; RV64-NEXT: vmv1r.v v9, v24 ; RV64-NEXT: add a5, a1, a2 ; RV64-NEXT: vmv1r.v v10, v22 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v11, v20 ; RV64-NEXT: add a4, a3, a2 ; RV64-NEXT: vmv1r.v v12, v16 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v13, v18 ; RV64-NEXT: vsseg6e32.v v8, (a0) ; RV64-NEXT: vl1re32.v v14, (a1) ; RV64-NEXT: add a1, a6, a2 ; RV64-NEXT: vl1re32.v v15, (a5) ; RV64-NEXT: add a5, a1, a2 ; RV64-NEXT: vl1re32.v v18, (a5) ; RV64-NEXT: add a5, a5, a2 ; RV64-NEXT: vl1re32.v v19, (a5) ; RV64-NEXT: add a5, a4, a2 ; RV64-NEXT: vl1re32.v v16, (a6) ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vl1re32.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v13, (a6) ; RV64-NEXT: csrr a6, vlenb ; RV64-NEXT: li a7, 12 ; RV64-NEXT: mul a6, a6, a7 ; RV64-NEXT: add a6, sp, a6 ; RV64-NEXT: addi a6, a6, 64 ; RV64-NEXT: vl1re32.v v17, (a1) ; RV64-NEXT: vl1re32.v v10, (a4) ; RV64-NEXT: vl1re32.v v11, (a5) ; RV64-NEXT: vl1re32.v v8, (a0) ; RV64-NEXT: vl1re32.v v9, (a3) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a6, a2 ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a6) ; RV64-NEXT: vl8re32.v v16, (a2) ; RV64-NEXT: vl8re32.v v8, (a6) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv24i32_nxv4i32: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a1, 28 ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v14 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: vmv2r.v v24, v10 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: li a0, 6 ; ZVBB-RV32-NEXT: mul a1, a1, a0 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv1r.v v10, v25 ; ZVBB-RV32-NEXT: vmv1r.v v11, v23 ; ZVBB-RV32-NEXT: vmv1r.v v12, v21 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv1r.v v13, v17 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv1r.v v14, v19 ; ZVBB-RV32-NEXT: vsseg6e32.v v9, (a1) ; ZVBB-RV32-NEXT: vmv1r.v v9, v24 ; ZVBB-RV32-NEXT: add a5, a1, a2 ; ZVBB-RV32-NEXT: vmv1r.v v10, v22 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v11, v20 ; ZVBB-RV32-NEXT: add a4, a3, a2 ; ZVBB-RV32-NEXT: vmv1r.v v12, v16 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v13, v18 ; ZVBB-RV32-NEXT: vsseg6e32.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re32.v v14, (a1) ; ZVBB-RV32-NEXT: add a1, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v15, (a5) ; ZVBB-RV32-NEXT: add a5, a1, a2 ; ZVBB-RV32-NEXT: vl1re32.v v18, (a5) ; ZVBB-RV32-NEXT: add a5, a5, a2 ; ZVBB-RV32-NEXT: vl1re32.v v19, (a5) ; ZVBB-RV32-NEXT: add a5, a4, a2 ; ZVBB-RV32-NEXT: vl1re32.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vl1re32.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v13, (a6) ; ZVBB-RV32-NEXT: csrr a6, vlenb ; ZVBB-RV32-NEXT: li a7, 12 ; ZVBB-RV32-NEXT: mul a6, a6, a7 ; ZVBB-RV32-NEXT: add a6, sp, a6 ; ZVBB-RV32-NEXT: addi a6, a6, 64 ; ZVBB-RV32-NEXT: vl1re32.v v17, (a1) ; ZVBB-RV32-NEXT: vl1re32.v v10, (a4) ; ZVBB-RV32-NEXT: vl1re32.v v11, (a5) ; ZVBB-RV32-NEXT: vl1re32.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re32.v v9, (a3) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a6, a2 ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a6) ; ZVBB-RV32-NEXT: vl8re32.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re32.v v8, (a6) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv24i32_nxv4i32: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a1, 28 ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v14 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: vmv2r.v v24, v10 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: li a0, 6 ; ZVBB-RV64-NEXT: mul a1, a1, a0 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv1r.v v10, v25 ; ZVBB-RV64-NEXT: vmv1r.v v11, v23 ; ZVBB-RV64-NEXT: vmv1r.v v12, v21 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv1r.v v13, v17 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv1r.v v14, v19 ; ZVBB-RV64-NEXT: vsseg6e32.v v9, (a1) ; ZVBB-RV64-NEXT: vmv1r.v v9, v24 ; ZVBB-RV64-NEXT: add a5, a1, a2 ; ZVBB-RV64-NEXT: vmv1r.v v10, v22 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v11, v20 ; ZVBB-RV64-NEXT: add a4, a3, a2 ; ZVBB-RV64-NEXT: vmv1r.v v12, v16 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v13, v18 ; ZVBB-RV64-NEXT: vsseg6e32.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re32.v v14, (a1) ; ZVBB-RV64-NEXT: add a1, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v15, (a5) ; ZVBB-RV64-NEXT: add a5, a1, a2 ; ZVBB-RV64-NEXT: vl1re32.v v18, (a5) ; ZVBB-RV64-NEXT: add a5, a5, a2 ; ZVBB-RV64-NEXT: vl1re32.v v19, (a5) ; ZVBB-RV64-NEXT: add a5, a4, a2 ; ZVBB-RV64-NEXT: vl1re32.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vl1re32.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v13, (a6) ; ZVBB-RV64-NEXT: csrr a6, vlenb ; ZVBB-RV64-NEXT: li a7, 12 ; ZVBB-RV64-NEXT: mul a6, a6, a7 ; ZVBB-RV64-NEXT: add a6, sp, a6 ; ZVBB-RV64-NEXT: addi a6, a6, 64 ; ZVBB-RV64-NEXT: vl1re32.v v17, (a1) ; ZVBB-RV64-NEXT: vl1re32.v v10, (a4) ; ZVBB-RV64-NEXT: vl1re32.v v11, (a5) ; ZVBB-RV64-NEXT: vl1re32.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re32.v v9, (a3) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a6, a2 ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a6) ; ZVBB-RV64-NEXT: vl8re32.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re32.v v8, (a6) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv24i32_nxv4i32: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 28 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v14 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: vmv2r.v v24, v10 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: li a0, 6 ; ZIP-NEXT: mul a1, a1, a0 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv1r.v v10, v25 ; ZIP-NEXT: vmv1r.v v11, v23 ; ZIP-NEXT: vmv1r.v v12, v21 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv1r.v v13, v17 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv1r.v v14, v19 ; ZIP-NEXT: vsseg6e32.v v9, (a1) ; ZIP-NEXT: vmv1r.v v9, v24 ; ZIP-NEXT: add a5, a1, a2 ; ZIP-NEXT: vmv1r.v v10, v22 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v11, v20 ; ZIP-NEXT: add a4, a3, a2 ; ZIP-NEXT: vmv1r.v v12, v16 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v13, v18 ; ZIP-NEXT: vsseg6e32.v v8, (a0) ; ZIP-NEXT: vl1re32.v v14, (a1) ; ZIP-NEXT: add a1, a6, a2 ; ZIP-NEXT: vl1re32.v v15, (a5) ; ZIP-NEXT: add a5, a1, a2 ; ZIP-NEXT: vl1re32.v v18, (a5) ; ZIP-NEXT: add a5, a5, a2 ; ZIP-NEXT: vl1re32.v v19, (a5) ; ZIP-NEXT: add a5, a4, a2 ; ZIP-NEXT: vl1re32.v v16, (a6) ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vl1re32.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v13, (a6) ; ZIP-NEXT: csrr a6, vlenb ; ZIP-NEXT: li a7, 12 ; ZIP-NEXT: mul a6, a6, a7 ; ZIP-NEXT: add a6, sp, a6 ; ZIP-NEXT: addi a6, a6, 64 ; ZIP-NEXT: vl1re32.v v17, (a1) ; ZIP-NEXT: vl1re32.v v10, (a4) ; ZIP-NEXT: vl1re32.v v11, (a5) ; ZIP-NEXT: vl1re32.v v8, (a0) ; ZIP-NEXT: vl1re32.v v9, (a3) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a6, a2 ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a6) ; ZIP-NEXT: vl8re32.v v16, (a2) ; ZIP-NEXT: vl8re32.v v8, (a6) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave6.nxv4i32( %a, %b, %c, %d, %e, %f) ret %res } define @vector_interleave_nxv12i64_nxv2i64( %a, %b, %c, %d, %e, %f) nounwind { ; ; RV32-LABEL: vector_interleave_nxv12i64_nxv2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 28 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v14 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: vmv2r.v v24, v10 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a0, 6 ; RV32-NEXT: mul a1, a1, a0 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv1r.v v10, v25 ; RV32-NEXT: vmv1r.v v11, v23 ; RV32-NEXT: vmv1r.v v12, v21 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv1r.v v13, v17 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv1r.v v14, v19 ; RV32-NEXT: vsseg6e64.v v9, (a1) ; RV32-NEXT: vmv1r.v v9, v24 ; RV32-NEXT: add a5, a1, a2 ; RV32-NEXT: vmv1r.v v10, v22 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v11, v20 ; RV32-NEXT: add a4, a3, a2 ; RV32-NEXT: vmv1r.v v12, v16 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v13, v18 ; RV32-NEXT: vsseg6e64.v v8, (a0) ; RV32-NEXT: vl1re64.v v14, (a1) ; RV32-NEXT: add a1, a6, a2 ; RV32-NEXT: vl1re64.v v15, (a5) ; RV32-NEXT: add a5, a1, a2 ; RV32-NEXT: vl1re64.v v18, (a5) ; RV32-NEXT: add a5, a5, a2 ; RV32-NEXT: vl1re64.v v19, (a5) ; RV32-NEXT: add a5, a4, a2 ; RV32-NEXT: vl1re64.v v16, (a6) ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vl1re64.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v13, (a6) ; RV32-NEXT: csrr a6, vlenb ; RV32-NEXT: li a7, 12 ; RV32-NEXT: mul a6, a6, a7 ; RV32-NEXT: add a6, sp, a6 ; RV32-NEXT: addi a6, a6, 64 ; RV32-NEXT: vl1re64.v v17, (a1) ; RV32-NEXT: vl1re64.v v10, (a4) ; RV32-NEXT: vl1re64.v v11, (a5) ; RV32-NEXT: vl1re64.v v8, (a0) ; RV32-NEXT: vl1re64.v v9, (a3) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a6, a2 ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a6) ; RV32-NEXT: vl8re64.v v16, (a2) ; RV32-NEXT: vl8re64.v v8, (a6) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv12i64_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 28 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v14 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: vmv2r.v v24, v10 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: li a0, 6 ; RV64-NEXT: mul a1, a1, a0 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv1r.v v10, v25 ; RV64-NEXT: vmv1r.v v11, v23 ; RV64-NEXT: vmv1r.v v12, v21 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv1r.v v13, v17 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv1r.v v14, v19 ; RV64-NEXT: vsseg6e64.v v9, (a1) ; RV64-NEXT: vmv1r.v v9, v24 ; RV64-NEXT: add a5, a1, a2 ; RV64-NEXT: vmv1r.v v10, v22 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v11, v20 ; RV64-NEXT: add a4, a3, a2 ; RV64-NEXT: vmv1r.v v12, v16 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v13, v18 ; RV64-NEXT: vsseg6e64.v v8, (a0) ; RV64-NEXT: vl1re64.v v14, (a1) ; RV64-NEXT: add a1, a6, a2 ; RV64-NEXT: vl1re64.v v15, (a5) ; RV64-NEXT: add a5, a1, a2 ; RV64-NEXT: vl1re64.v v18, (a5) ; RV64-NEXT: add a5, a5, a2 ; RV64-NEXT: vl1re64.v v19, (a5) ; RV64-NEXT: add a5, a4, a2 ; RV64-NEXT: vl1re64.v v16, (a6) ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vl1re64.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v13, (a6) ; RV64-NEXT: csrr a6, vlenb ; RV64-NEXT: li a7, 12 ; RV64-NEXT: mul a6, a6, a7 ; RV64-NEXT: add a6, sp, a6 ; RV64-NEXT: addi a6, a6, 64 ; RV64-NEXT: vl1re64.v v17, (a1) ; RV64-NEXT: vl1re64.v v10, (a4) ; RV64-NEXT: vl1re64.v v11, (a5) ; RV64-NEXT: vl1re64.v v8, (a0) ; RV64-NEXT: vl1re64.v v9, (a3) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a6, a2 ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a6) ; RV64-NEXT: vl8re64.v v16, (a2) ; RV64-NEXT: vl8re64.v v8, (a6) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv12i64_nxv2i64: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a1, 28 ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v14 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: vmv2r.v v24, v10 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: li a0, 6 ; ZVBB-RV32-NEXT: mul a1, a1, a0 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv1r.v v10, v25 ; ZVBB-RV32-NEXT: vmv1r.v v11, v23 ; ZVBB-RV32-NEXT: vmv1r.v v12, v21 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv1r.v v13, v17 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv1r.v v14, v19 ; ZVBB-RV32-NEXT: vsseg6e64.v v9, (a1) ; ZVBB-RV32-NEXT: vmv1r.v v9, v24 ; ZVBB-RV32-NEXT: add a5, a1, a2 ; ZVBB-RV32-NEXT: vmv1r.v v10, v22 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v11, v20 ; ZVBB-RV32-NEXT: add a4, a3, a2 ; ZVBB-RV32-NEXT: vmv1r.v v12, v16 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v13, v18 ; ZVBB-RV32-NEXT: vsseg6e64.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re64.v v14, (a1) ; ZVBB-RV32-NEXT: add a1, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v15, (a5) ; ZVBB-RV32-NEXT: add a5, a1, a2 ; ZVBB-RV32-NEXT: vl1re64.v v18, (a5) ; ZVBB-RV32-NEXT: add a5, a5, a2 ; ZVBB-RV32-NEXT: vl1re64.v v19, (a5) ; ZVBB-RV32-NEXT: add a5, a4, a2 ; ZVBB-RV32-NEXT: vl1re64.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vl1re64.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v13, (a6) ; ZVBB-RV32-NEXT: csrr a6, vlenb ; ZVBB-RV32-NEXT: li a7, 12 ; ZVBB-RV32-NEXT: mul a6, a6, a7 ; ZVBB-RV32-NEXT: add a6, sp, a6 ; ZVBB-RV32-NEXT: addi a6, a6, 64 ; ZVBB-RV32-NEXT: vl1re64.v v17, (a1) ; ZVBB-RV32-NEXT: vl1re64.v v10, (a4) ; ZVBB-RV32-NEXT: vl1re64.v v11, (a5) ; ZVBB-RV32-NEXT: vl1re64.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re64.v v9, (a3) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a6, a2 ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a6) ; ZVBB-RV32-NEXT: vl8re64.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re64.v v8, (a6) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv12i64_nxv2i64: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a1, 28 ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v14 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: vmv2r.v v24, v10 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: li a0, 6 ; ZVBB-RV64-NEXT: mul a1, a1, a0 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv1r.v v10, v25 ; ZVBB-RV64-NEXT: vmv1r.v v11, v23 ; ZVBB-RV64-NEXT: vmv1r.v v12, v21 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv1r.v v13, v17 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv1r.v v14, v19 ; ZVBB-RV64-NEXT: vsseg6e64.v v9, (a1) ; ZVBB-RV64-NEXT: vmv1r.v v9, v24 ; ZVBB-RV64-NEXT: add a5, a1, a2 ; ZVBB-RV64-NEXT: vmv1r.v v10, v22 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v11, v20 ; ZVBB-RV64-NEXT: add a4, a3, a2 ; ZVBB-RV64-NEXT: vmv1r.v v12, v16 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v13, v18 ; ZVBB-RV64-NEXT: vsseg6e64.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re64.v v14, (a1) ; ZVBB-RV64-NEXT: add a1, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v15, (a5) ; ZVBB-RV64-NEXT: add a5, a1, a2 ; ZVBB-RV64-NEXT: vl1re64.v v18, (a5) ; ZVBB-RV64-NEXT: add a5, a5, a2 ; ZVBB-RV64-NEXT: vl1re64.v v19, (a5) ; ZVBB-RV64-NEXT: add a5, a4, a2 ; ZVBB-RV64-NEXT: vl1re64.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vl1re64.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v13, (a6) ; ZVBB-RV64-NEXT: csrr a6, vlenb ; ZVBB-RV64-NEXT: li a7, 12 ; ZVBB-RV64-NEXT: mul a6, a6, a7 ; ZVBB-RV64-NEXT: add a6, sp, a6 ; ZVBB-RV64-NEXT: addi a6, a6, 64 ; ZVBB-RV64-NEXT: vl1re64.v v17, (a1) ; ZVBB-RV64-NEXT: vl1re64.v v10, (a4) ; ZVBB-RV64-NEXT: vl1re64.v v11, (a5) ; ZVBB-RV64-NEXT: vl1re64.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re64.v v9, (a3) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a6, a2 ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a6) ; ZVBB-RV64-NEXT: vl8re64.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re64.v v8, (a6) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv12i64_nxv2i64: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 28 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v14 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: vmv2r.v v24, v10 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: li a0, 6 ; ZIP-NEXT: mul a1, a1, a0 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv1r.v v10, v25 ; ZIP-NEXT: vmv1r.v v11, v23 ; ZIP-NEXT: vmv1r.v v12, v21 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv1r.v v13, v17 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv1r.v v14, v19 ; ZIP-NEXT: vsseg6e64.v v9, (a1) ; ZIP-NEXT: vmv1r.v v9, v24 ; ZIP-NEXT: add a5, a1, a2 ; ZIP-NEXT: vmv1r.v v10, v22 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v11, v20 ; ZIP-NEXT: add a4, a3, a2 ; ZIP-NEXT: vmv1r.v v12, v16 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v13, v18 ; ZIP-NEXT: vsseg6e64.v v8, (a0) ; ZIP-NEXT: vl1re64.v v14, (a1) ; ZIP-NEXT: add a1, a6, a2 ; ZIP-NEXT: vl1re64.v v15, (a5) ; ZIP-NEXT: add a5, a1, a2 ; ZIP-NEXT: vl1re64.v v18, (a5) ; ZIP-NEXT: add a5, a5, a2 ; ZIP-NEXT: vl1re64.v v19, (a5) ; ZIP-NEXT: add a5, a4, a2 ; ZIP-NEXT: vl1re64.v v16, (a6) ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vl1re64.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v13, (a6) ; ZIP-NEXT: csrr a6, vlenb ; ZIP-NEXT: li a7, 12 ; ZIP-NEXT: mul a6, a6, a7 ; ZIP-NEXT: add a6, sp, a6 ; ZIP-NEXT: addi a6, a6, 64 ; ZIP-NEXT: vl1re64.v v17, (a1) ; ZIP-NEXT: vl1re64.v v10, (a4) ; ZIP-NEXT: vl1re64.v v11, (a5) ; ZIP-NEXT: vl1re64.v v8, (a0) ; ZIP-NEXT: vl1re64.v v9, (a3) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a6, a2 ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a6) ; ZIP-NEXT: vl8re64.v v16, (a2) ; ZIP-NEXT: vl8re64.v v8, (a6) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave6.nxv12i64( %a, %b, %c, %d, %e, %f) ret %res } define @vector_interleave_nxv112i1_nxv16i1( %a, %b, %c, %d, %e, %f, %g) nounwind { ; CHECK-LABEL: vector_interleave_nxv112i1_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 14 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v14, 0 ; CHECK-NEXT: addi a3, sp, 16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 3 ; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: vmerge.vim v16, v14, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v22, v14, 1, v0 ; CHECK-NEXT: add a2, a3, a1 ; CHECK-NEXT: vmv4r.v v24, v16 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v18, v14, 1, v0 ; CHECK-NEXT: add a4, a2, a1 ; CHECK-NEXT: vmv1r.v v25, v22 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmerge.vim v8, v14, 1, v0 ; CHECK-NEXT: vmv1r.v v26, v18 ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: vmerge.vim v20, v14, 1, v0 ; CHECK-NEXT: vmv1r.v v27, v8 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmerge.vim v10, v14, 1, v0 ; CHECK-NEXT: vmv1r.v v28, v20 ; CHECK-NEXT: vmv1r.v v18, v23 ; CHECK-NEXT: add a5, a4, a1 ; CHECK-NEXT: vmv1r.v v29, v10 ; CHECK-NEXT: vmv1r.v v20, v9 ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: vmerge.vim v30, v14, 1, v0 ; CHECK-NEXT: vmv1r.v v22, v11 ; CHECK-NEXT: vsetvli a6, zero, e8, m1, ta, ma ; CHECK-NEXT: vsseg7e8.v v24, (a3) ; CHECK-NEXT: vmv1r.v v23, v31 ; CHECK-NEXT: vsseg7e8.v v17, (a0) ; CHECK-NEXT: vl1r.v v8, (a4) ; CHECK-NEXT: add a4, a5, a1 ; CHECK-NEXT: vl1r.v v10, (a3) ; CHECK-NEXT: add a6, a4, a1 ; CHECK-NEXT: vl1r.v v12, (a4) ; CHECK-NEXT: add a3, a6, a1 ; CHECK-NEXT: vl1r.v v14, (a3) ; CHECK-NEXT: srli a3, a1, 1 ; CHECK-NEXT: vl1r.v v9, (a5) ; CHECK-NEXT: add a4, a0, a1 ; CHECK-NEXT: vl1r.v v16, (a4) ; CHECK-NEXT: add a4, a4, a1 ; CHECK-NEXT: vl1r.v v11, (a2) ; CHECK-NEXT: add a2, a4, a1 ; CHECK-NEXT: vl1r.v v18, (a2) ; CHECK-NEXT: add a2, a2, a1 ; CHECK-NEXT: vl1r.v v13, (a6) ; CHECK-NEXT: add a5, a2, a1 ; CHECK-NEXT: vl1r.v v20, (a5) ; CHECK-NEXT: add a5, a5, a1 ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vl1r.v v15, (a0) ; CHECK-NEXT: vl1r.v v19, (a2) ; CHECK-NEXT: vl1r.v v17, (a4) ; CHECK-NEXT: vl1r.v v21, (a5) ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmsne.vi v22, v8, 0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmsne.vi v9, v12, 0 ; CHECK-NEXT: vmsne.vi v10, v14, 0 ; CHECK-NEXT: vmsne.vi v11, v18, 0 ; CHECK-NEXT: vmsne.vi v8, v16, 0 ; CHECK-NEXT: vmsne.vi v12, v20, 0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v0, v22, a1 ; CHECK-NEXT: vslideup.vx v9, v10, a1 ; CHECK-NEXT: vslideup.vx v8, v11, a1 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vslideup.vx v0, v9, a3 ; CHECK-NEXT: vslideup.vx v8, v12, a3 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 14 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv112i1_nxv16i1: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 14 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZVBB-NEXT: vmv.v.i v14, 0 ; ZVBB-NEXT: addi a3, sp, 16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 3 ; ZVBB-NEXT: sub a0, a1, a0 ; ZVBB-NEXT: add a0, sp, a0 ; ZVBB-NEXT: addi a0, a0, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: vmerge.vim v16, v14, 1, v0 ; ZVBB-NEXT: vmv1r.v v0, v8 ; ZVBB-NEXT: vmerge.vim v22, v14, 1, v0 ; ZVBB-NEXT: add a2, a3, a1 ; ZVBB-NEXT: vmv4r.v v24, v16 ; ZVBB-NEXT: vmv1r.v v0, v9 ; ZVBB-NEXT: vmerge.vim v18, v14, 1, v0 ; ZVBB-NEXT: add a4, a2, a1 ; ZVBB-NEXT: vmv1r.v v25, v22 ; ZVBB-NEXT: vmv1r.v v0, v10 ; ZVBB-NEXT: vmerge.vim v8, v14, 1, v0 ; ZVBB-NEXT: vmv1r.v v26, v18 ; ZVBB-NEXT: vmv1r.v v0, v11 ; ZVBB-NEXT: vmerge.vim v20, v14, 1, v0 ; ZVBB-NEXT: vmv1r.v v27, v8 ; ZVBB-NEXT: vmv1r.v v0, v12 ; ZVBB-NEXT: vmerge.vim v10, v14, 1, v0 ; ZVBB-NEXT: vmv1r.v v28, v20 ; ZVBB-NEXT: vmv1r.v v18, v23 ; ZVBB-NEXT: add a5, a4, a1 ; ZVBB-NEXT: vmv1r.v v29, v10 ; ZVBB-NEXT: vmv1r.v v20, v9 ; ZVBB-NEXT: vmv1r.v v0, v13 ; ZVBB-NEXT: vmerge.vim v30, v14, 1, v0 ; ZVBB-NEXT: vmv1r.v v22, v11 ; ZVBB-NEXT: vsetvli a6, zero, e8, m1, ta, ma ; ZVBB-NEXT: vsseg7e8.v v24, (a3) ; ZVBB-NEXT: vmv1r.v v23, v31 ; ZVBB-NEXT: vsseg7e8.v v17, (a0) ; ZVBB-NEXT: vl1r.v v8, (a4) ; ZVBB-NEXT: add a4, a5, a1 ; ZVBB-NEXT: vl1r.v v10, (a3) ; ZVBB-NEXT: add a6, a4, a1 ; ZVBB-NEXT: vl1r.v v12, (a4) ; ZVBB-NEXT: add a3, a6, a1 ; ZVBB-NEXT: vl1r.v v14, (a3) ; ZVBB-NEXT: srli a3, a1, 1 ; ZVBB-NEXT: vl1r.v v9, (a5) ; ZVBB-NEXT: add a4, a0, a1 ; ZVBB-NEXT: vl1r.v v16, (a4) ; ZVBB-NEXT: add a4, a4, a1 ; ZVBB-NEXT: vl1r.v v11, (a2) ; ZVBB-NEXT: add a2, a4, a1 ; ZVBB-NEXT: vl1r.v v18, (a2) ; ZVBB-NEXT: add a2, a2, a1 ; ZVBB-NEXT: vl1r.v v13, (a6) ; ZVBB-NEXT: add a5, a2, a1 ; ZVBB-NEXT: vl1r.v v20, (a5) ; ZVBB-NEXT: add a5, a5, a1 ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vl1r.v v15, (a0) ; ZVBB-NEXT: vl1r.v v19, (a2) ; ZVBB-NEXT: vl1r.v v17, (a4) ; ZVBB-NEXT: vl1r.v v21, (a5) ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZVBB-NEXT: vmsne.vi v22, v8, 0 ; ZVBB-NEXT: vmsne.vi v0, v10, 0 ; ZVBB-NEXT: vmsne.vi v9, v12, 0 ; ZVBB-NEXT: vmsne.vi v10, v14, 0 ; ZVBB-NEXT: vmsne.vi v11, v18, 0 ; ZVBB-NEXT: vmsne.vi v8, v16, 0 ; ZVBB-NEXT: vmsne.vi v12, v20, 0 ; ZVBB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; ZVBB-NEXT: vslideup.vx v0, v22, a1 ; ZVBB-NEXT: vslideup.vx v9, v10, a1 ; ZVBB-NEXT: vslideup.vx v8, v11, a1 ; ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v0, v9, a3 ; ZVBB-NEXT: vslideup.vx v8, v12, a3 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 14 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave7.nxv112i1( %a, %b, %c, %d, %e, %f, %g) ret %res } define @vector_interleave_nxv112i8_nxv16i8( %a, %b, %c, %d, %e, %f, %g) nounwind { ; ; RV32-LABEL: vector_interleave_nxv112i8_nxv16i8: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-NEXT: vmv2r.v v26, v20 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v24, v16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 3 ; RV32-NEXT: sub a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v20, v8 ; RV32-NEXT: vmv1r.v v1, v20 ; RV32-NEXT: vmv1r.v v3, v22 ; RV32-NEXT: vmv1r.v v5, v24 ; RV32-NEXT: vmv1r.v v7, v26 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v2, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: slli a5, a2, 2 ; RV32-NEXT: vmv1r.v v4, v14 ; RV32-NEXT: slli a6, a2, 4 ; RV32-NEXT: add a7, a4, a2 ; RV32-NEXT: vmv1r.v v6, v18 ; RV32-NEXT: sub a5, a6, a5 ; RV32-NEXT: vmv1r.v v22, v11 ; RV32-NEXT: add a6, a7, a2 ; RV32-NEXT: vmv1r.v v24, v15 ; RV32-NEXT: vsseg7e8.v v1, (a0) ; RV32-NEXT: vmv1r.v v26, v19 ; RV32-NEXT: vsseg7e8.v v21, (a1) ; RV32-NEXT: vl1r.v v18, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1r.v v19, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1r.v v20, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1r.v v21, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1r.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1r.v v11, (a6) ; RV32-NEXT: vl1r.v v8, (a0) ; RV32-NEXT: vl1r.v v16, (a4) ; RV32-NEXT: vl1r.v v9, (a3) ; RV32-NEXT: vl1r.v v17, (a7) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 14 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1r.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1r.v v13, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vl1r.v v14, (a6) ; RV32-NEXT: vl1r.v v15, (a1) ; RV32-NEXT: add a5, a0, a5 ; RV32-NEXT: vs2r.v v20, (a5) ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8r.v v16, (a2) ; RV32-NEXT: vl8r.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv112i8_nxv16i8: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 5 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-NEXT: vmv2r.v v26, v20 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v24, v16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 3 ; RV64-NEXT: sub a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v20, v8 ; RV64-NEXT: vmv1r.v v1, v20 ; RV64-NEXT: vmv1r.v v3, v22 ; RV64-NEXT: vmv1r.v v5, v24 ; RV64-NEXT: vmv1r.v v7, v26 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v2, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: slli a5, a2, 2 ; RV64-NEXT: vmv1r.v v4, v14 ; RV64-NEXT: slli a6, a2, 4 ; RV64-NEXT: add a7, a4, a2 ; RV64-NEXT: vmv1r.v v6, v18 ; RV64-NEXT: sub a5, a6, a5 ; RV64-NEXT: vmv1r.v v22, v11 ; RV64-NEXT: add a6, a7, a2 ; RV64-NEXT: vmv1r.v v24, v15 ; RV64-NEXT: vsseg7e8.v v1, (a0) ; RV64-NEXT: vmv1r.v v26, v19 ; RV64-NEXT: vsseg7e8.v v21, (a1) ; RV64-NEXT: vl1r.v v18, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1r.v v19, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1r.v v20, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1r.v v21, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1r.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1r.v v11, (a6) ; RV64-NEXT: vl1r.v v8, (a0) ; RV64-NEXT: vl1r.v v16, (a4) ; RV64-NEXT: vl1r.v v9, (a3) ; RV64-NEXT: vl1r.v v17, (a7) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 14 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1r.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1r.v v13, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vl1r.v v14, (a6) ; RV64-NEXT: vl1r.v v15, (a1) ; RV64-NEXT: add a5, a0, a5 ; RV64-NEXT: vs2r.v v20, (a5) ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8r.v v16, (a2) ; RV64-NEXT: vl8r.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv112i8_nxv16i8: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: slli a0, a0, 5 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v26, v20 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v24, v16 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 3 ; ZVBB-RV32-NEXT: sub a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v20, v8 ; ZVBB-RV32-NEXT: vmv1r.v v1, v20 ; ZVBB-RV32-NEXT: vmv1r.v v3, v22 ; ZVBB-RV32-NEXT: vmv1r.v v5, v24 ; ZVBB-RV32-NEXT: vmv1r.v v7, v26 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v2, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: slli a5, a2, 2 ; ZVBB-RV32-NEXT: vmv1r.v v4, v14 ; ZVBB-RV32-NEXT: slli a6, a2, 4 ; ZVBB-RV32-NEXT: add a7, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v6, v18 ; ZVBB-RV32-NEXT: sub a5, a6, a5 ; ZVBB-RV32-NEXT: vmv1r.v v22, v11 ; ZVBB-RV32-NEXT: add a6, a7, a2 ; ZVBB-RV32-NEXT: vmv1r.v v24, v15 ; ZVBB-RV32-NEXT: vsseg7e8.v v1, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v26, v19 ; ZVBB-RV32-NEXT: vsseg7e8.v v21, (a1) ; ZVBB-RV32-NEXT: vl1r.v v18, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1r.v v19, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1r.v v20, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1r.v v21, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1r.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1r.v v11, (a6) ; ZVBB-RV32-NEXT: vl1r.v v8, (a0) ; ZVBB-RV32-NEXT: vl1r.v v16, (a4) ; ZVBB-RV32-NEXT: vl1r.v v9, (a3) ; ZVBB-RV32-NEXT: vl1r.v v17, (a7) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 14 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1r.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1r.v v13, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vl1r.v v14, (a6) ; ZVBB-RV32-NEXT: vl1r.v v15, (a1) ; ZVBB-RV32-NEXT: add a5, a0, a5 ; ZVBB-RV32-NEXT: vs2r.v v20, (a5) ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8r.v v16, (a2) ; ZVBB-RV32-NEXT: vl8r.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv112i8_nxv16i8: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: slli a0, a0, 5 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v26, v20 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v24, v16 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 3 ; ZVBB-RV64-NEXT: sub a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v20, v8 ; ZVBB-RV64-NEXT: vmv1r.v v1, v20 ; ZVBB-RV64-NEXT: vmv1r.v v3, v22 ; ZVBB-RV64-NEXT: vmv1r.v v5, v24 ; ZVBB-RV64-NEXT: vmv1r.v v7, v26 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v2, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: slli a5, a2, 2 ; ZVBB-RV64-NEXT: vmv1r.v v4, v14 ; ZVBB-RV64-NEXT: slli a6, a2, 4 ; ZVBB-RV64-NEXT: add a7, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v6, v18 ; ZVBB-RV64-NEXT: sub a5, a6, a5 ; ZVBB-RV64-NEXT: vmv1r.v v22, v11 ; ZVBB-RV64-NEXT: add a6, a7, a2 ; ZVBB-RV64-NEXT: vmv1r.v v24, v15 ; ZVBB-RV64-NEXT: vsseg7e8.v v1, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v26, v19 ; ZVBB-RV64-NEXT: vsseg7e8.v v21, (a1) ; ZVBB-RV64-NEXT: vl1r.v v18, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1r.v v19, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1r.v v20, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1r.v v21, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1r.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1r.v v11, (a6) ; ZVBB-RV64-NEXT: vl1r.v v8, (a0) ; ZVBB-RV64-NEXT: vl1r.v v16, (a4) ; ZVBB-RV64-NEXT: vl1r.v v9, (a3) ; ZVBB-RV64-NEXT: vl1r.v v17, (a7) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 14 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1r.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1r.v v13, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vl1r.v v14, (a6) ; ZVBB-RV64-NEXT: vl1r.v v15, (a1) ; ZVBB-RV64-NEXT: add a5, a0, a5 ; ZVBB-RV64-NEXT: vs2r.v v20, (a5) ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8r.v v16, (a2) ; ZVBB-RV64-NEXT: vl8r.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv112i8_nxv16i8: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: slli a0, a0, 5 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZIP-NEXT: vmv2r.v v26, v20 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v24, v16 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 3 ; ZIP-NEXT: sub a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v20, v8 ; ZIP-NEXT: vmv1r.v v1, v20 ; ZIP-NEXT: vmv1r.v v3, v22 ; ZIP-NEXT: vmv1r.v v5, v24 ; ZIP-NEXT: vmv1r.v v7, v26 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v2, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: slli a5, a2, 2 ; ZIP-NEXT: vmv1r.v v4, v14 ; ZIP-NEXT: slli a6, a2, 4 ; ZIP-NEXT: add a7, a4, a2 ; ZIP-NEXT: vmv1r.v v6, v18 ; ZIP-NEXT: sub a5, a6, a5 ; ZIP-NEXT: vmv1r.v v22, v11 ; ZIP-NEXT: add a6, a7, a2 ; ZIP-NEXT: vmv1r.v v24, v15 ; ZIP-NEXT: vsseg7e8.v v1, (a0) ; ZIP-NEXT: vmv1r.v v26, v19 ; ZIP-NEXT: vsseg7e8.v v21, (a1) ; ZIP-NEXT: vl1r.v v18, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1r.v v19, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1r.v v20, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1r.v v21, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1r.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1r.v v11, (a6) ; ZIP-NEXT: vl1r.v v8, (a0) ; ZIP-NEXT: vl1r.v v16, (a4) ; ZIP-NEXT: vl1r.v v9, (a3) ; ZIP-NEXT: vl1r.v v17, (a7) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 14 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1r.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1r.v v13, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vl1r.v v14, (a6) ; ZIP-NEXT: vl1r.v v15, (a1) ; ZIP-NEXT: add a5, a0, a5 ; ZIP-NEXT: vs2r.v v20, (a5) ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8r.v v16, (a2) ; ZIP-NEXT: vl8r.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave7.nxv112i8( %a, %b, %c, %d, %e, %f, %g) ret %res } define @vector_interleave_nxv56i16_nxv8i16( %a, %b, %c, %d, %e, %f, %g) nounwind { ; ; RV32-LABEL: vector_interleave_nxv56i16_nxv8i16: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vmv2r.v v26, v20 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v24, v16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 3 ; RV32-NEXT: sub a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v20, v8 ; RV32-NEXT: vmv1r.v v1, v20 ; RV32-NEXT: vmv1r.v v3, v22 ; RV32-NEXT: vmv1r.v v5, v24 ; RV32-NEXT: vmv1r.v v7, v26 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v2, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: slli a5, a2, 2 ; RV32-NEXT: vmv1r.v v4, v14 ; RV32-NEXT: slli a6, a2, 4 ; RV32-NEXT: add a7, a4, a2 ; RV32-NEXT: vmv1r.v v6, v18 ; RV32-NEXT: sub a5, a6, a5 ; RV32-NEXT: vmv1r.v v22, v11 ; RV32-NEXT: add a6, a7, a2 ; RV32-NEXT: vmv1r.v v24, v15 ; RV32-NEXT: vsseg7e16.v v1, (a0) ; RV32-NEXT: vmv1r.v v26, v19 ; RV32-NEXT: vsseg7e16.v v21, (a1) ; RV32-NEXT: vl1re16.v v18, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v19, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v20, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v21, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1re16.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v11, (a6) ; RV32-NEXT: vl1re16.v v8, (a0) ; RV32-NEXT: vl1re16.v v16, (a4) ; RV32-NEXT: vl1re16.v v9, (a3) ; RV32-NEXT: vl1re16.v v17, (a7) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 14 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v13, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vl1re16.v v14, (a6) ; RV32-NEXT: vl1re16.v v15, (a1) ; RV32-NEXT: add a5, a0, a5 ; RV32-NEXT: vs2r.v v20, (a5) ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8re16.v v16, (a2) ; RV32-NEXT: vl8re16.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv56i16_nxv8i16: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 5 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vmv2r.v v26, v20 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v24, v16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 3 ; RV64-NEXT: sub a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v20, v8 ; RV64-NEXT: vmv1r.v v1, v20 ; RV64-NEXT: vmv1r.v v3, v22 ; RV64-NEXT: vmv1r.v v5, v24 ; RV64-NEXT: vmv1r.v v7, v26 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v2, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: slli a5, a2, 2 ; RV64-NEXT: vmv1r.v v4, v14 ; RV64-NEXT: slli a6, a2, 4 ; RV64-NEXT: add a7, a4, a2 ; RV64-NEXT: vmv1r.v v6, v18 ; RV64-NEXT: sub a5, a6, a5 ; RV64-NEXT: vmv1r.v v22, v11 ; RV64-NEXT: add a6, a7, a2 ; RV64-NEXT: vmv1r.v v24, v15 ; RV64-NEXT: vsseg7e16.v v1, (a0) ; RV64-NEXT: vmv1r.v v26, v19 ; RV64-NEXT: vsseg7e16.v v21, (a1) ; RV64-NEXT: vl1re16.v v18, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v19, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v20, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v21, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1re16.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v11, (a6) ; RV64-NEXT: vl1re16.v v8, (a0) ; RV64-NEXT: vl1re16.v v16, (a4) ; RV64-NEXT: vl1re16.v v9, (a3) ; RV64-NEXT: vl1re16.v v17, (a7) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 14 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v13, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vl1re16.v v14, (a6) ; RV64-NEXT: vl1re16.v v15, (a1) ; RV64-NEXT: add a5, a0, a5 ; RV64-NEXT: vs2r.v v20, (a5) ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8re16.v v16, (a2) ; RV64-NEXT: vl8re16.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv56i16_nxv8i16: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: slli a0, a0, 5 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v26, v20 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v24, v16 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 3 ; ZVBB-RV32-NEXT: sub a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v20, v8 ; ZVBB-RV32-NEXT: vmv1r.v v1, v20 ; ZVBB-RV32-NEXT: vmv1r.v v3, v22 ; ZVBB-RV32-NEXT: vmv1r.v v5, v24 ; ZVBB-RV32-NEXT: vmv1r.v v7, v26 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v2, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: slli a5, a2, 2 ; ZVBB-RV32-NEXT: vmv1r.v v4, v14 ; ZVBB-RV32-NEXT: slli a6, a2, 4 ; ZVBB-RV32-NEXT: add a7, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v6, v18 ; ZVBB-RV32-NEXT: sub a5, a6, a5 ; ZVBB-RV32-NEXT: vmv1r.v v22, v11 ; ZVBB-RV32-NEXT: add a6, a7, a2 ; ZVBB-RV32-NEXT: vmv1r.v v24, v15 ; ZVBB-RV32-NEXT: vsseg7e16.v v1, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v26, v19 ; ZVBB-RV32-NEXT: vsseg7e16.v v21, (a1) ; ZVBB-RV32-NEXT: vl1re16.v v18, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v19, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v20, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v21, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1re16.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v11, (a6) ; ZVBB-RV32-NEXT: vl1re16.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re16.v v16, (a4) ; ZVBB-RV32-NEXT: vl1re16.v v9, (a3) ; ZVBB-RV32-NEXT: vl1re16.v v17, (a7) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 14 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v13, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vl1re16.v v14, (a6) ; ZVBB-RV32-NEXT: vl1re16.v v15, (a1) ; ZVBB-RV32-NEXT: add a5, a0, a5 ; ZVBB-RV32-NEXT: vs2r.v v20, (a5) ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8re16.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re16.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv56i16_nxv8i16: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: slli a0, a0, 5 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v26, v20 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v24, v16 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 3 ; ZVBB-RV64-NEXT: sub a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v20, v8 ; ZVBB-RV64-NEXT: vmv1r.v v1, v20 ; ZVBB-RV64-NEXT: vmv1r.v v3, v22 ; ZVBB-RV64-NEXT: vmv1r.v v5, v24 ; ZVBB-RV64-NEXT: vmv1r.v v7, v26 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v2, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: slli a5, a2, 2 ; ZVBB-RV64-NEXT: vmv1r.v v4, v14 ; ZVBB-RV64-NEXT: slli a6, a2, 4 ; ZVBB-RV64-NEXT: add a7, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v6, v18 ; ZVBB-RV64-NEXT: sub a5, a6, a5 ; ZVBB-RV64-NEXT: vmv1r.v v22, v11 ; ZVBB-RV64-NEXT: add a6, a7, a2 ; ZVBB-RV64-NEXT: vmv1r.v v24, v15 ; ZVBB-RV64-NEXT: vsseg7e16.v v1, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v26, v19 ; ZVBB-RV64-NEXT: vsseg7e16.v v21, (a1) ; ZVBB-RV64-NEXT: vl1re16.v v18, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v19, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v20, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v21, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1re16.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v11, (a6) ; ZVBB-RV64-NEXT: vl1re16.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re16.v v16, (a4) ; ZVBB-RV64-NEXT: vl1re16.v v9, (a3) ; ZVBB-RV64-NEXT: vl1re16.v v17, (a7) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 14 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v13, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vl1re16.v v14, (a6) ; ZVBB-RV64-NEXT: vl1re16.v v15, (a1) ; ZVBB-RV64-NEXT: add a5, a0, a5 ; ZVBB-RV64-NEXT: vs2r.v v20, (a5) ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8re16.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re16.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv56i16_nxv8i16: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: slli a0, a0, 5 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZIP-NEXT: vmv2r.v v26, v20 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v24, v16 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 3 ; ZIP-NEXT: sub a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v20, v8 ; ZIP-NEXT: vmv1r.v v1, v20 ; ZIP-NEXT: vmv1r.v v3, v22 ; ZIP-NEXT: vmv1r.v v5, v24 ; ZIP-NEXT: vmv1r.v v7, v26 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v2, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: slli a5, a2, 2 ; ZIP-NEXT: vmv1r.v v4, v14 ; ZIP-NEXT: slli a6, a2, 4 ; ZIP-NEXT: add a7, a4, a2 ; ZIP-NEXT: vmv1r.v v6, v18 ; ZIP-NEXT: sub a5, a6, a5 ; ZIP-NEXT: vmv1r.v v22, v11 ; ZIP-NEXT: add a6, a7, a2 ; ZIP-NEXT: vmv1r.v v24, v15 ; ZIP-NEXT: vsseg7e16.v v1, (a0) ; ZIP-NEXT: vmv1r.v v26, v19 ; ZIP-NEXT: vsseg7e16.v v21, (a1) ; ZIP-NEXT: vl1re16.v v18, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v19, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v20, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v21, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1re16.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v11, (a6) ; ZIP-NEXT: vl1re16.v v8, (a0) ; ZIP-NEXT: vl1re16.v v16, (a4) ; ZIP-NEXT: vl1re16.v v9, (a3) ; ZIP-NEXT: vl1re16.v v17, (a7) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 14 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v13, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vl1re16.v v14, (a6) ; ZIP-NEXT: vl1re16.v v15, (a1) ; ZIP-NEXT: add a5, a0, a5 ; ZIP-NEXT: vs2r.v v20, (a5) ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8re16.v v16, (a2) ; ZIP-NEXT: vl8re16.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave7.nxv56i16( %a, %b, %c, %d, %e, %f, %g) ret %res } define @vector_interleave_nxv28i32_nxv4i32( %a, %b, %c, %d, %e, %f, %g) nounwind { ; ; RV32-LABEL: vector_interleave_nxv28i32_nxv4i32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vmv2r.v v26, v20 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v24, v16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 3 ; RV32-NEXT: sub a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v20, v8 ; RV32-NEXT: vmv1r.v v1, v20 ; RV32-NEXT: vmv1r.v v3, v22 ; RV32-NEXT: vmv1r.v v5, v24 ; RV32-NEXT: vmv1r.v v7, v26 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v2, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: slli a5, a2, 2 ; RV32-NEXT: vmv1r.v v4, v14 ; RV32-NEXT: slli a6, a2, 4 ; RV32-NEXT: add a7, a4, a2 ; RV32-NEXT: vmv1r.v v6, v18 ; RV32-NEXT: sub a5, a6, a5 ; RV32-NEXT: vmv1r.v v22, v11 ; RV32-NEXT: add a6, a7, a2 ; RV32-NEXT: vmv1r.v v24, v15 ; RV32-NEXT: vsseg7e32.v v1, (a0) ; RV32-NEXT: vmv1r.v v26, v19 ; RV32-NEXT: vsseg7e32.v v21, (a1) ; RV32-NEXT: vl1re32.v v18, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v19, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v20, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v21, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1re32.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v11, (a6) ; RV32-NEXT: vl1re32.v v8, (a0) ; RV32-NEXT: vl1re32.v v16, (a4) ; RV32-NEXT: vl1re32.v v9, (a3) ; RV32-NEXT: vl1re32.v v17, (a7) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 14 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v13, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vl1re32.v v14, (a6) ; RV32-NEXT: vl1re32.v v15, (a1) ; RV32-NEXT: add a5, a0, a5 ; RV32-NEXT: vs2r.v v20, (a5) ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8re32.v v16, (a2) ; RV32-NEXT: vl8re32.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv28i32_nxv4i32: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 5 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vmv2r.v v26, v20 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v24, v16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 3 ; RV64-NEXT: sub a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v20, v8 ; RV64-NEXT: vmv1r.v v1, v20 ; RV64-NEXT: vmv1r.v v3, v22 ; RV64-NEXT: vmv1r.v v5, v24 ; RV64-NEXT: vmv1r.v v7, v26 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v2, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: slli a5, a2, 2 ; RV64-NEXT: vmv1r.v v4, v14 ; RV64-NEXT: slli a6, a2, 4 ; RV64-NEXT: add a7, a4, a2 ; RV64-NEXT: vmv1r.v v6, v18 ; RV64-NEXT: sub a5, a6, a5 ; RV64-NEXT: vmv1r.v v22, v11 ; RV64-NEXT: add a6, a7, a2 ; RV64-NEXT: vmv1r.v v24, v15 ; RV64-NEXT: vsseg7e32.v v1, (a0) ; RV64-NEXT: vmv1r.v v26, v19 ; RV64-NEXT: vsseg7e32.v v21, (a1) ; RV64-NEXT: vl1re32.v v18, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v19, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v20, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v21, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1re32.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v11, (a6) ; RV64-NEXT: vl1re32.v v8, (a0) ; RV64-NEXT: vl1re32.v v16, (a4) ; RV64-NEXT: vl1re32.v v9, (a3) ; RV64-NEXT: vl1re32.v v17, (a7) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 14 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v13, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vl1re32.v v14, (a6) ; RV64-NEXT: vl1re32.v v15, (a1) ; RV64-NEXT: add a5, a0, a5 ; RV64-NEXT: vs2r.v v20, (a5) ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8re32.v v16, (a2) ; RV64-NEXT: vl8re32.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv28i32_nxv4i32: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: slli a0, a0, 5 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v26, v20 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v24, v16 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 3 ; ZVBB-RV32-NEXT: sub a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v20, v8 ; ZVBB-RV32-NEXT: vmv1r.v v1, v20 ; ZVBB-RV32-NEXT: vmv1r.v v3, v22 ; ZVBB-RV32-NEXT: vmv1r.v v5, v24 ; ZVBB-RV32-NEXT: vmv1r.v v7, v26 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v2, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: slli a5, a2, 2 ; ZVBB-RV32-NEXT: vmv1r.v v4, v14 ; ZVBB-RV32-NEXT: slli a6, a2, 4 ; ZVBB-RV32-NEXT: add a7, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v6, v18 ; ZVBB-RV32-NEXT: sub a5, a6, a5 ; ZVBB-RV32-NEXT: vmv1r.v v22, v11 ; ZVBB-RV32-NEXT: add a6, a7, a2 ; ZVBB-RV32-NEXT: vmv1r.v v24, v15 ; ZVBB-RV32-NEXT: vsseg7e32.v v1, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v26, v19 ; ZVBB-RV32-NEXT: vsseg7e32.v v21, (a1) ; ZVBB-RV32-NEXT: vl1re32.v v18, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v19, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v20, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v21, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1re32.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v11, (a6) ; ZVBB-RV32-NEXT: vl1re32.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re32.v v16, (a4) ; ZVBB-RV32-NEXT: vl1re32.v v9, (a3) ; ZVBB-RV32-NEXT: vl1re32.v v17, (a7) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 14 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v13, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vl1re32.v v14, (a6) ; ZVBB-RV32-NEXT: vl1re32.v v15, (a1) ; ZVBB-RV32-NEXT: add a5, a0, a5 ; ZVBB-RV32-NEXT: vs2r.v v20, (a5) ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8re32.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re32.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv28i32_nxv4i32: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: slli a0, a0, 5 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v26, v20 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v24, v16 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 3 ; ZVBB-RV64-NEXT: sub a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v20, v8 ; ZVBB-RV64-NEXT: vmv1r.v v1, v20 ; ZVBB-RV64-NEXT: vmv1r.v v3, v22 ; ZVBB-RV64-NEXT: vmv1r.v v5, v24 ; ZVBB-RV64-NEXT: vmv1r.v v7, v26 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v2, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: slli a5, a2, 2 ; ZVBB-RV64-NEXT: vmv1r.v v4, v14 ; ZVBB-RV64-NEXT: slli a6, a2, 4 ; ZVBB-RV64-NEXT: add a7, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v6, v18 ; ZVBB-RV64-NEXT: sub a5, a6, a5 ; ZVBB-RV64-NEXT: vmv1r.v v22, v11 ; ZVBB-RV64-NEXT: add a6, a7, a2 ; ZVBB-RV64-NEXT: vmv1r.v v24, v15 ; ZVBB-RV64-NEXT: vsseg7e32.v v1, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v26, v19 ; ZVBB-RV64-NEXT: vsseg7e32.v v21, (a1) ; ZVBB-RV64-NEXT: vl1re32.v v18, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v19, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v20, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v21, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1re32.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v11, (a6) ; ZVBB-RV64-NEXT: vl1re32.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re32.v v16, (a4) ; ZVBB-RV64-NEXT: vl1re32.v v9, (a3) ; ZVBB-RV64-NEXT: vl1re32.v v17, (a7) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 14 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v13, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vl1re32.v v14, (a6) ; ZVBB-RV64-NEXT: vl1re32.v v15, (a1) ; ZVBB-RV64-NEXT: add a5, a0, a5 ; ZVBB-RV64-NEXT: vs2r.v v20, (a5) ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8re32.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re32.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv28i32_nxv4i32: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: slli a0, a0, 5 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZIP-NEXT: vmv2r.v v26, v20 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v24, v16 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 3 ; ZIP-NEXT: sub a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v20, v8 ; ZIP-NEXT: vmv1r.v v1, v20 ; ZIP-NEXT: vmv1r.v v3, v22 ; ZIP-NEXT: vmv1r.v v5, v24 ; ZIP-NEXT: vmv1r.v v7, v26 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v2, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: slli a5, a2, 2 ; ZIP-NEXT: vmv1r.v v4, v14 ; ZIP-NEXT: slli a6, a2, 4 ; ZIP-NEXT: add a7, a4, a2 ; ZIP-NEXT: vmv1r.v v6, v18 ; ZIP-NEXT: sub a5, a6, a5 ; ZIP-NEXT: vmv1r.v v22, v11 ; ZIP-NEXT: add a6, a7, a2 ; ZIP-NEXT: vmv1r.v v24, v15 ; ZIP-NEXT: vsseg7e32.v v1, (a0) ; ZIP-NEXT: vmv1r.v v26, v19 ; ZIP-NEXT: vsseg7e32.v v21, (a1) ; ZIP-NEXT: vl1re32.v v18, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v19, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v20, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v21, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1re32.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v11, (a6) ; ZIP-NEXT: vl1re32.v v8, (a0) ; ZIP-NEXT: vl1re32.v v16, (a4) ; ZIP-NEXT: vl1re32.v v9, (a3) ; ZIP-NEXT: vl1re32.v v17, (a7) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 14 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v13, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vl1re32.v v14, (a6) ; ZIP-NEXT: vl1re32.v v15, (a1) ; ZIP-NEXT: add a5, a0, a5 ; ZIP-NEXT: vs2r.v v20, (a5) ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8re32.v v16, (a2) ; ZIP-NEXT: vl8re32.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave7.nxv28i32( %a, %b, %c, %d, %e, %f, %g) ret %res } define @vector_interleave_nxv14i64_nxv2i64( %a, %b, %c, %d, %e, %f, %g) nounwind { ; ; RV32-LABEL: vector_interleave_nxv14i64_nxv2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vmv2r.v v26, v20 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v24, v16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 3 ; RV32-NEXT: sub a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v20, v8 ; RV32-NEXT: vmv1r.v v1, v20 ; RV32-NEXT: vmv1r.v v3, v22 ; RV32-NEXT: vmv1r.v v5, v24 ; RV32-NEXT: vmv1r.v v7, v26 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v2, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: slli a5, a2, 2 ; RV32-NEXT: vmv1r.v v4, v14 ; RV32-NEXT: slli a6, a2, 4 ; RV32-NEXT: add a7, a4, a2 ; RV32-NEXT: vmv1r.v v6, v18 ; RV32-NEXT: sub a5, a6, a5 ; RV32-NEXT: vmv1r.v v22, v11 ; RV32-NEXT: add a6, a7, a2 ; RV32-NEXT: vmv1r.v v24, v15 ; RV32-NEXT: vsseg7e64.v v1, (a0) ; RV32-NEXT: vmv1r.v v26, v19 ; RV32-NEXT: vsseg7e64.v v21, (a1) ; RV32-NEXT: vl1re64.v v18, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v19, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v20, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v21, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1re64.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v11, (a6) ; RV32-NEXT: vl1re64.v v8, (a0) ; RV32-NEXT: vl1re64.v v16, (a4) ; RV32-NEXT: vl1re64.v v9, (a3) ; RV32-NEXT: vl1re64.v v17, (a7) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 14 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v13, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vl1re64.v v14, (a6) ; RV32-NEXT: vl1re64.v v15, (a1) ; RV32-NEXT: add a5, a0, a5 ; RV32-NEXT: vs2r.v v20, (a5) ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8re64.v v16, (a2) ; RV32-NEXT: vl8re64.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv14i64_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 5 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vmv2r.v v26, v20 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v24, v16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 3 ; RV64-NEXT: sub a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v20, v8 ; RV64-NEXT: vmv1r.v v1, v20 ; RV64-NEXT: vmv1r.v v3, v22 ; RV64-NEXT: vmv1r.v v5, v24 ; RV64-NEXT: vmv1r.v v7, v26 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v2, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: slli a5, a2, 2 ; RV64-NEXT: vmv1r.v v4, v14 ; RV64-NEXT: slli a6, a2, 4 ; RV64-NEXT: add a7, a4, a2 ; RV64-NEXT: vmv1r.v v6, v18 ; RV64-NEXT: sub a5, a6, a5 ; RV64-NEXT: vmv1r.v v22, v11 ; RV64-NEXT: add a6, a7, a2 ; RV64-NEXT: vmv1r.v v24, v15 ; RV64-NEXT: vsseg7e64.v v1, (a0) ; RV64-NEXT: vmv1r.v v26, v19 ; RV64-NEXT: vsseg7e64.v v21, (a1) ; RV64-NEXT: vl1re64.v v18, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v19, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v20, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v21, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1re64.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v11, (a6) ; RV64-NEXT: vl1re64.v v8, (a0) ; RV64-NEXT: vl1re64.v v16, (a4) ; RV64-NEXT: vl1re64.v v9, (a3) ; RV64-NEXT: vl1re64.v v17, (a7) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 14 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v13, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vl1re64.v v14, (a6) ; RV64-NEXT: vl1re64.v v15, (a1) ; RV64-NEXT: add a5, a0, a5 ; RV64-NEXT: vs2r.v v20, (a5) ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8re64.v v16, (a2) ; RV64-NEXT: vl8re64.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv14i64_nxv2i64: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: slli a0, a0, 5 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v26, v20 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v24, v16 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 3 ; ZVBB-RV32-NEXT: sub a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v20, v8 ; ZVBB-RV32-NEXT: vmv1r.v v1, v20 ; ZVBB-RV32-NEXT: vmv1r.v v3, v22 ; ZVBB-RV32-NEXT: vmv1r.v v5, v24 ; ZVBB-RV32-NEXT: vmv1r.v v7, v26 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v2, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: slli a5, a2, 2 ; ZVBB-RV32-NEXT: vmv1r.v v4, v14 ; ZVBB-RV32-NEXT: slli a6, a2, 4 ; ZVBB-RV32-NEXT: add a7, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v6, v18 ; ZVBB-RV32-NEXT: sub a5, a6, a5 ; ZVBB-RV32-NEXT: vmv1r.v v22, v11 ; ZVBB-RV32-NEXT: add a6, a7, a2 ; ZVBB-RV32-NEXT: vmv1r.v v24, v15 ; ZVBB-RV32-NEXT: vsseg7e64.v v1, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v26, v19 ; ZVBB-RV32-NEXT: vsseg7e64.v v21, (a1) ; ZVBB-RV32-NEXT: vl1re64.v v18, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v19, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v20, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v21, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1re64.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v11, (a6) ; ZVBB-RV32-NEXT: vl1re64.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re64.v v16, (a4) ; ZVBB-RV32-NEXT: vl1re64.v v9, (a3) ; ZVBB-RV32-NEXT: vl1re64.v v17, (a7) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 14 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v13, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vl1re64.v v14, (a6) ; ZVBB-RV32-NEXT: vl1re64.v v15, (a1) ; ZVBB-RV32-NEXT: add a5, a0, a5 ; ZVBB-RV32-NEXT: vs2r.v v20, (a5) ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8re64.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re64.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv14i64_nxv2i64: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: slli a0, a0, 5 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v26, v20 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v24, v16 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 3 ; ZVBB-RV64-NEXT: sub a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v20, v8 ; ZVBB-RV64-NEXT: vmv1r.v v1, v20 ; ZVBB-RV64-NEXT: vmv1r.v v3, v22 ; ZVBB-RV64-NEXT: vmv1r.v v5, v24 ; ZVBB-RV64-NEXT: vmv1r.v v7, v26 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v2, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: slli a5, a2, 2 ; ZVBB-RV64-NEXT: vmv1r.v v4, v14 ; ZVBB-RV64-NEXT: slli a6, a2, 4 ; ZVBB-RV64-NEXT: add a7, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v6, v18 ; ZVBB-RV64-NEXT: sub a5, a6, a5 ; ZVBB-RV64-NEXT: vmv1r.v v22, v11 ; ZVBB-RV64-NEXT: add a6, a7, a2 ; ZVBB-RV64-NEXT: vmv1r.v v24, v15 ; ZVBB-RV64-NEXT: vsseg7e64.v v1, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v26, v19 ; ZVBB-RV64-NEXT: vsseg7e64.v v21, (a1) ; ZVBB-RV64-NEXT: vl1re64.v v18, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v19, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v20, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v21, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1re64.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v11, (a6) ; ZVBB-RV64-NEXT: vl1re64.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re64.v v16, (a4) ; ZVBB-RV64-NEXT: vl1re64.v v9, (a3) ; ZVBB-RV64-NEXT: vl1re64.v v17, (a7) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 14 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v13, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vl1re64.v v14, (a6) ; ZVBB-RV64-NEXT: vl1re64.v v15, (a1) ; ZVBB-RV64-NEXT: add a5, a0, a5 ; ZVBB-RV64-NEXT: vs2r.v v20, (a5) ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8re64.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re64.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv14i64_nxv2i64: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: slli a0, a0, 5 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZIP-NEXT: vmv2r.v v26, v20 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v24, v16 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 3 ; ZIP-NEXT: sub a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v20, v8 ; ZIP-NEXT: vmv1r.v v1, v20 ; ZIP-NEXT: vmv1r.v v3, v22 ; ZIP-NEXT: vmv1r.v v5, v24 ; ZIP-NEXT: vmv1r.v v7, v26 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v2, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: slli a5, a2, 2 ; ZIP-NEXT: vmv1r.v v4, v14 ; ZIP-NEXT: slli a6, a2, 4 ; ZIP-NEXT: add a7, a4, a2 ; ZIP-NEXT: vmv1r.v v6, v18 ; ZIP-NEXT: sub a5, a6, a5 ; ZIP-NEXT: vmv1r.v v22, v11 ; ZIP-NEXT: add a6, a7, a2 ; ZIP-NEXT: vmv1r.v v24, v15 ; ZIP-NEXT: vsseg7e64.v v1, (a0) ; ZIP-NEXT: vmv1r.v v26, v19 ; ZIP-NEXT: vsseg7e64.v v21, (a1) ; ZIP-NEXT: vl1re64.v v18, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v19, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v20, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v21, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1re64.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v11, (a6) ; ZIP-NEXT: vl1re64.v v8, (a0) ; ZIP-NEXT: vl1re64.v v16, (a4) ; ZIP-NEXT: vl1re64.v v9, (a3) ; ZIP-NEXT: vl1re64.v v17, (a7) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 14 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v13, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vl1re64.v v14, (a6) ; ZIP-NEXT: vl1re64.v v15, (a1) ; ZIP-NEXT: add a5, a0, a5 ; ZIP-NEXT: vs2r.v v20, (a5) ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8re64.v v16, (a2) ; ZIP-NEXT: vl8re64.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave7.nxv14i64( %a, %b, %c, %d, %e, %f, %g) ret %res } define @vector_interleave_nxv128i1_nxv16i1( %a, %b, %c, %d, %e, %f, %g, %h) nounwind { ; CHECK-LABEL: vector_interleave_nxv128i1_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v22, 0 ; CHECK-NEXT: vmerge.vim v24, v22, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v16, v22, 1, v0 ; CHECK-NEXT: vmv1r.v v1, v24 ; CHECK-NEXT: vmv1r.v v2, v16 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v26, v22, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmerge.vim v18, v22, 1, v0 ; CHECK-NEXT: vmv1r.v v3, v26 ; CHECK-NEXT: vmv1r.v v4, v18 ; CHECK-NEXT: vmv1r.v v0, v11 ; CHECK-NEXT: vmerge.vim v8, v22, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmerge.vim v20, v22, 1, v0 ; CHECK-NEXT: vmv1r.v v5, v8 ; CHECK-NEXT: vmv1r.v v6, v20 ; CHECK-NEXT: vmv1r.v v0, v13 ; CHECK-NEXT: vmerge.vim v10, v22, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v14 ; CHECK-NEXT: vmerge.vim v22, v22, 1, v0 ; CHECK-NEXT: vmv1r.v v7, v10 ; CHECK-NEXT: vmv1r.v v8, v22 ; CHECK-NEXT: vmv1r.v v16, v25 ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vsseg8e8.v v1, (a2) ; CHECK-NEXT: vmv1r.v v18, v27 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: add a3, a2, a0 ; CHECK-NEXT: add a4, a1, a0 ; CHECK-NEXT: add a5, a3, a0 ; CHECK-NEXT: add a6, a4, a0 ; CHECK-NEXT: add a7, a5, a0 ; CHECK-NEXT: add t0, a6, a0 ; CHECK-NEXT: vmv1r.v v20, v9 ; CHECK-NEXT: add t1, a7, a0 ; CHECK-NEXT: vmv1r.v v22, v11 ; CHECK-NEXT: vsseg8e8.v v16, (a1) ; CHECK-NEXT: vl1r.v v8, (a5) ; CHECK-NEXT: add a5, t0, a0 ; CHECK-NEXT: vl1r.v v12, (t1) ; CHECK-NEXT: add t1, t1, a0 ; CHECK-NEXT: vl1r.v v14, (a2) ; CHECK-NEXT: add a2, a5, a0 ; CHECK-NEXT: vl1r.v v10, (a5) ; CHECK-NEXT: add a5, t1, a0 ; CHECK-NEXT: vl1r.v v16, (a5) ; CHECK-NEXT: add a5, a5, a0 ; CHECK-NEXT: vl1r.v v17, (a5) ; CHECK-NEXT: add a5, a2, a0 ; CHECK-NEXT: vl1r.v v18, (a5) ; CHECK-NEXT: add a5, a5, a0 ; CHECK-NEXT: vl1r.v v13, (t1) ; CHECK-NEXT: vl1r.v v9, (a7) ; CHECK-NEXT: vl1r.v v15, (a3) ; CHECK-NEXT: vsetvli a3, zero, e8, m2, ta, ma ; CHECK-NEXT: vmsne.vi v20, v16, 0 ; CHECK-NEXT: vmsne.vi v16, v12, 0 ; CHECK-NEXT: vl1r.v v12, (a6) ; CHECK-NEXT: vmsne.vi v17, v8, 0 ; CHECK-NEXT: vmsne.vi v0, v14, 0 ; CHECK-NEXT: vl1r.v v14, (a1) ; CHECK-NEXT: vl1r.v v19, (a5) ; CHECK-NEXT: vl1r.v v11, (a2) ; CHECK-NEXT: vl1r.v v13, (t0) ; CHECK-NEXT: vl1r.v v15, (a4) ; CHECK-NEXT: vmsne.vi v9, v18, 0 ; CHECK-NEXT: vmsne.vi v18, v10, 0 ; CHECK-NEXT: vmsne.vi v10, v12, 0 ; CHECK-NEXT: vmsne.vi v8, v14, 0 ; CHECK-NEXT: srli a1, a0, 2 ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v16, v20, a1 ; CHECK-NEXT: vslideup.vx v0, v17, a1 ; CHECK-NEXT: vslideup.vx v18, v9, a1 ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslideup.vx v0, v16, a0 ; CHECK-NEXT: vslideup.vx v8, v18, a0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv128i1_nxv16i1: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZVBB-NEXT: vmv.v.i v22, 0 ; ZVBB-NEXT: vmerge.vim v24, v22, 1, v0 ; ZVBB-NEXT: vmv1r.v v0, v8 ; ZVBB-NEXT: vmerge.vim v16, v22, 1, v0 ; ZVBB-NEXT: vmv1r.v v1, v24 ; ZVBB-NEXT: vmv1r.v v2, v16 ; ZVBB-NEXT: vmv1r.v v0, v9 ; ZVBB-NEXT: vmerge.vim v26, v22, 1, v0 ; ZVBB-NEXT: vmv1r.v v0, v10 ; ZVBB-NEXT: vmerge.vim v18, v22, 1, v0 ; ZVBB-NEXT: vmv1r.v v3, v26 ; ZVBB-NEXT: vmv1r.v v4, v18 ; ZVBB-NEXT: vmv1r.v v0, v11 ; ZVBB-NEXT: vmerge.vim v8, v22, 1, v0 ; ZVBB-NEXT: vmv1r.v v0, v12 ; ZVBB-NEXT: vmerge.vim v20, v22, 1, v0 ; ZVBB-NEXT: vmv1r.v v5, v8 ; ZVBB-NEXT: vmv1r.v v6, v20 ; ZVBB-NEXT: vmv1r.v v0, v13 ; ZVBB-NEXT: vmerge.vim v10, v22, 1, v0 ; ZVBB-NEXT: vmv1r.v v0, v14 ; ZVBB-NEXT: vmerge.vim v22, v22, 1, v0 ; ZVBB-NEXT: vmv1r.v v7, v10 ; ZVBB-NEXT: vmv1r.v v8, v22 ; ZVBB-NEXT: vmv1r.v v16, v25 ; ZVBB-NEXT: addi a2, sp, 16 ; ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZVBB-NEXT: vsseg8e8.v v1, (a2) ; ZVBB-NEXT: vmv1r.v v18, v27 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 3 ; ZVBB-NEXT: add a1, sp, a1 ; ZVBB-NEXT: addi a1, a1, 16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: add a3, a2, a0 ; ZVBB-NEXT: add a4, a1, a0 ; ZVBB-NEXT: add a5, a3, a0 ; ZVBB-NEXT: add a6, a4, a0 ; ZVBB-NEXT: add a7, a5, a0 ; ZVBB-NEXT: add t0, a6, a0 ; ZVBB-NEXT: vmv1r.v v20, v9 ; ZVBB-NEXT: add t1, a7, a0 ; ZVBB-NEXT: vmv1r.v v22, v11 ; ZVBB-NEXT: vsseg8e8.v v16, (a1) ; ZVBB-NEXT: vl1r.v v8, (a5) ; ZVBB-NEXT: add a5, t0, a0 ; ZVBB-NEXT: vl1r.v v12, (t1) ; ZVBB-NEXT: add t1, t1, a0 ; ZVBB-NEXT: vl1r.v v14, (a2) ; ZVBB-NEXT: add a2, a5, a0 ; ZVBB-NEXT: vl1r.v v10, (a5) ; ZVBB-NEXT: add a5, t1, a0 ; ZVBB-NEXT: vl1r.v v16, (a5) ; ZVBB-NEXT: add a5, a5, a0 ; ZVBB-NEXT: vl1r.v v17, (a5) ; ZVBB-NEXT: add a5, a2, a0 ; ZVBB-NEXT: vl1r.v v18, (a5) ; ZVBB-NEXT: add a5, a5, a0 ; ZVBB-NEXT: vl1r.v v13, (t1) ; ZVBB-NEXT: vl1r.v v9, (a7) ; ZVBB-NEXT: vl1r.v v15, (a3) ; ZVBB-NEXT: vsetvli a3, zero, e8, m2, ta, ma ; ZVBB-NEXT: vmsne.vi v20, v16, 0 ; ZVBB-NEXT: vmsne.vi v16, v12, 0 ; ZVBB-NEXT: vl1r.v v12, (a6) ; ZVBB-NEXT: vmsne.vi v17, v8, 0 ; ZVBB-NEXT: vmsne.vi v0, v14, 0 ; ZVBB-NEXT: vl1r.v v14, (a1) ; ZVBB-NEXT: vl1r.v v19, (a5) ; ZVBB-NEXT: vl1r.v v11, (a2) ; ZVBB-NEXT: vl1r.v v13, (t0) ; ZVBB-NEXT: vl1r.v v15, (a4) ; ZVBB-NEXT: vmsne.vi v9, v18, 0 ; ZVBB-NEXT: vmsne.vi v18, v10, 0 ; ZVBB-NEXT: vmsne.vi v10, v12, 0 ; ZVBB-NEXT: vmsne.vi v8, v14, 0 ; ZVBB-NEXT: srli a1, a0, 2 ; ZVBB-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; ZVBB-NEXT: vslideup.vx v16, v20, a1 ; ZVBB-NEXT: vslideup.vx v0, v17, a1 ; ZVBB-NEXT: vslideup.vx v18, v9, a1 ; ZVBB-NEXT: vslideup.vx v8, v10, a1 ; ZVBB-NEXT: srli a0, a0, 1 ; ZVBB-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v0, v16, a0 ; ZVBB-NEXT: vslideup.vx v8, v18, a0 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv128i1( %a, %b, %c, %d, %e, %f, %g, %h) ret %res } define @vector_interleave_nxv128i8_nxv16i8( %a, %b, %c, %d, %e, %f, %g, %h) nounwind { ; ; CHECK-LABEL: vector_interleave_nxv128i8_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv2r.v v28, v22 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vmv2r.v v26, v18 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: add a2, a0, a3 ; CHECK-NEXT: add a4, a1, a3 ; CHECK-NEXT: add a5, a2, a3 ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv2r.v v24, v14 ; CHECK-NEXT: add a6, a4, a3 ; CHECK-NEXT: vmv2r.v v22, v10 ; CHECK-NEXT: vmv1r.v v2, v22 ; CHECK-NEXT: add a7, a5, a3 ; CHECK-NEXT: vmv1r.v v3, v12 ; CHECK-NEXT: add t0, a6, a3 ; CHECK-NEXT: vmv1r.v v4, v24 ; CHECK-NEXT: add t1, a7, a3 ; CHECK-NEXT: vmv1r.v v5, v16 ; CHECK-NEXT: add t2, t0, a3 ; CHECK-NEXT: vmv1r.v v6, v26 ; CHECK-NEXT: add t3, t1, a3 ; CHECK-NEXT: vmv1r.v v7, v20 ; CHECK-NEXT: add t4, t2, a3 ; CHECK-NEXT: vmv1r.v v8, v28 ; CHECK-NEXT: vmv1r.v v22, v9 ; CHECK-NEXT: add t5, t3, a3 ; CHECK-NEXT: vmv1r.v v24, v13 ; CHECK-NEXT: add t6, t4, a3 ; CHECK-NEXT: vmv1r.v v26, v17 ; CHECK-NEXT: vsseg8e8.v v1, (a0) ; CHECK-NEXT: vmv1r.v v28, v21 ; CHECK-NEXT: vsseg8e8.v v22, (a1) ; CHECK-NEXT: vl1r.v v14, (t5) ; CHECK-NEXT: add t5, t5, a3 ; CHECK-NEXT: add a3, t6, a3 ; CHECK-NEXT: vl1r.v v22, (t6) ; CHECK-NEXT: vl1r.v v15, (t5) ; CHECK-NEXT: vl1r.v v23, (a3) ; CHECK-NEXT: vl1r.v v12, (t1) ; CHECK-NEXT: vl1r.v v20, (t2) ; CHECK-NEXT: vl1r.v v13, (t3) ; CHECK-NEXT: vl1r.v v21, (t4) ; CHECK-NEXT: vl1r.v v10, (a5) ; CHECK-NEXT: vl1r.v v18, (a6) ; CHECK-NEXT: vl1r.v v11, (a7) ; CHECK-NEXT: vl1r.v v19, (t0) ; CHECK-NEXT: vl1r.v v8, (a0) ; CHECK-NEXT: vl1r.v v16, (a1) ; CHECK-NEXT: vl1r.v v9, (a2) ; CHECK-NEXT: vl1r.v v17, (a4) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv128i8_nxv16i8: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; ZVBB-NEXT: vmv2r.v v28, v22 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: vmv2r.v v26, v18 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 3 ; ZVBB-NEXT: add a1, sp, a1 ; ZVBB-NEXT: addi a1, a1, 16 ; ZVBB-NEXT: csrr a3, vlenb ; ZVBB-NEXT: add a2, a0, a3 ; ZVBB-NEXT: add a4, a1, a3 ; ZVBB-NEXT: add a5, a2, a3 ; ZVBB-NEXT: vmv1r.v v1, v8 ; ZVBB-NEXT: vmv2r.v v24, v14 ; ZVBB-NEXT: add a6, a4, a3 ; ZVBB-NEXT: vmv2r.v v22, v10 ; ZVBB-NEXT: vmv1r.v v2, v22 ; ZVBB-NEXT: add a7, a5, a3 ; ZVBB-NEXT: vmv1r.v v3, v12 ; ZVBB-NEXT: add t0, a6, a3 ; ZVBB-NEXT: vmv1r.v v4, v24 ; ZVBB-NEXT: add t1, a7, a3 ; ZVBB-NEXT: vmv1r.v v5, v16 ; ZVBB-NEXT: add t2, t0, a3 ; ZVBB-NEXT: vmv1r.v v6, v26 ; ZVBB-NEXT: add t3, t1, a3 ; ZVBB-NEXT: vmv1r.v v7, v20 ; ZVBB-NEXT: add t4, t2, a3 ; ZVBB-NEXT: vmv1r.v v8, v28 ; ZVBB-NEXT: vmv1r.v v22, v9 ; ZVBB-NEXT: add t5, t3, a3 ; ZVBB-NEXT: vmv1r.v v24, v13 ; ZVBB-NEXT: add t6, t4, a3 ; ZVBB-NEXT: vmv1r.v v26, v17 ; ZVBB-NEXT: vsseg8e8.v v1, (a0) ; ZVBB-NEXT: vmv1r.v v28, v21 ; ZVBB-NEXT: vsseg8e8.v v22, (a1) ; ZVBB-NEXT: vl1r.v v14, (t5) ; ZVBB-NEXT: add t5, t5, a3 ; ZVBB-NEXT: add a3, t6, a3 ; ZVBB-NEXT: vl1r.v v22, (t6) ; ZVBB-NEXT: vl1r.v v15, (t5) ; ZVBB-NEXT: vl1r.v v23, (a3) ; ZVBB-NEXT: vl1r.v v12, (t1) ; ZVBB-NEXT: vl1r.v v20, (t2) ; ZVBB-NEXT: vl1r.v v13, (t3) ; ZVBB-NEXT: vl1r.v v21, (t4) ; ZVBB-NEXT: vl1r.v v10, (a5) ; ZVBB-NEXT: vl1r.v v18, (a6) ; ZVBB-NEXT: vl1r.v v11, (a7) ; ZVBB-NEXT: vl1r.v v19, (t0) ; ZVBB-NEXT: vl1r.v v8, (a0) ; ZVBB-NEXT: vl1r.v v16, (a1) ; ZVBB-NEXT: vl1r.v v9, (a2) ; ZVBB-NEXT: vl1r.v v17, (a4) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv128i8( %a, %b, %c, %d, %e, %f, %g, %h) ret %res } define @vector_interleave_nxv64i16_nxv8i16( %a, %b, %c, %d, %e, %f, %g, %h) nounwind { ; ; CHECK-LABEL: vector_interleave_nxv64i16_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv2r.v v28, v22 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vmv2r.v v26, v18 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: add a2, a0, a3 ; CHECK-NEXT: add a4, a1, a3 ; CHECK-NEXT: add a5, a2, a3 ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv2r.v v24, v14 ; CHECK-NEXT: add a6, a4, a3 ; CHECK-NEXT: vmv2r.v v22, v10 ; CHECK-NEXT: vmv1r.v v2, v22 ; CHECK-NEXT: add a7, a5, a3 ; CHECK-NEXT: vmv1r.v v3, v12 ; CHECK-NEXT: add t0, a6, a3 ; CHECK-NEXT: vmv1r.v v4, v24 ; CHECK-NEXT: add t1, a7, a3 ; CHECK-NEXT: vmv1r.v v5, v16 ; CHECK-NEXT: add t2, t0, a3 ; CHECK-NEXT: vmv1r.v v6, v26 ; CHECK-NEXT: add t3, t1, a3 ; CHECK-NEXT: vmv1r.v v7, v20 ; CHECK-NEXT: add t4, t2, a3 ; CHECK-NEXT: vmv1r.v v8, v28 ; CHECK-NEXT: vmv1r.v v22, v9 ; CHECK-NEXT: add t5, t3, a3 ; CHECK-NEXT: vmv1r.v v24, v13 ; CHECK-NEXT: add t6, t4, a3 ; CHECK-NEXT: vmv1r.v v26, v17 ; CHECK-NEXT: vsseg8e16.v v1, (a0) ; CHECK-NEXT: vmv1r.v v28, v21 ; CHECK-NEXT: vsseg8e16.v v22, (a1) ; CHECK-NEXT: vl1re16.v v14, (t5) ; CHECK-NEXT: add t5, t5, a3 ; CHECK-NEXT: add a3, t6, a3 ; CHECK-NEXT: vl1re16.v v22, (t6) ; CHECK-NEXT: vl1re16.v v15, (t5) ; CHECK-NEXT: vl1re16.v v23, (a3) ; CHECK-NEXT: vl1re16.v v12, (t1) ; CHECK-NEXT: vl1re16.v v20, (t2) ; CHECK-NEXT: vl1re16.v v13, (t3) ; CHECK-NEXT: vl1re16.v v21, (t4) ; CHECK-NEXT: vl1re16.v v10, (a5) ; CHECK-NEXT: vl1re16.v v18, (a6) ; CHECK-NEXT: vl1re16.v v11, (a7) ; CHECK-NEXT: vl1re16.v v19, (t0) ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v16, (a1) ; CHECK-NEXT: vl1re16.v v9, (a2) ; CHECK-NEXT: vl1re16.v v17, (a4) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv64i16_nxv8i16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vmv2r.v v28, v22 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: vmv2r.v v26, v18 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 3 ; ZVBB-NEXT: add a1, sp, a1 ; ZVBB-NEXT: addi a1, a1, 16 ; ZVBB-NEXT: csrr a3, vlenb ; ZVBB-NEXT: add a2, a0, a3 ; ZVBB-NEXT: add a4, a1, a3 ; ZVBB-NEXT: add a5, a2, a3 ; ZVBB-NEXT: vmv1r.v v1, v8 ; ZVBB-NEXT: vmv2r.v v24, v14 ; ZVBB-NEXT: add a6, a4, a3 ; ZVBB-NEXT: vmv2r.v v22, v10 ; ZVBB-NEXT: vmv1r.v v2, v22 ; ZVBB-NEXT: add a7, a5, a3 ; ZVBB-NEXT: vmv1r.v v3, v12 ; ZVBB-NEXT: add t0, a6, a3 ; ZVBB-NEXT: vmv1r.v v4, v24 ; ZVBB-NEXT: add t1, a7, a3 ; ZVBB-NEXT: vmv1r.v v5, v16 ; ZVBB-NEXT: add t2, t0, a3 ; ZVBB-NEXT: vmv1r.v v6, v26 ; ZVBB-NEXT: add t3, t1, a3 ; ZVBB-NEXT: vmv1r.v v7, v20 ; ZVBB-NEXT: add t4, t2, a3 ; ZVBB-NEXT: vmv1r.v v8, v28 ; ZVBB-NEXT: vmv1r.v v22, v9 ; ZVBB-NEXT: add t5, t3, a3 ; ZVBB-NEXT: vmv1r.v v24, v13 ; ZVBB-NEXT: add t6, t4, a3 ; ZVBB-NEXT: vmv1r.v v26, v17 ; ZVBB-NEXT: vsseg8e16.v v1, (a0) ; ZVBB-NEXT: vmv1r.v v28, v21 ; ZVBB-NEXT: vsseg8e16.v v22, (a1) ; ZVBB-NEXT: vl1re16.v v14, (t5) ; ZVBB-NEXT: add t5, t5, a3 ; ZVBB-NEXT: add a3, t6, a3 ; ZVBB-NEXT: vl1re16.v v22, (t6) ; ZVBB-NEXT: vl1re16.v v15, (t5) ; ZVBB-NEXT: vl1re16.v v23, (a3) ; ZVBB-NEXT: vl1re16.v v12, (t1) ; ZVBB-NEXT: vl1re16.v v20, (t2) ; ZVBB-NEXT: vl1re16.v v13, (t3) ; ZVBB-NEXT: vl1re16.v v21, (t4) ; ZVBB-NEXT: vl1re16.v v10, (a5) ; ZVBB-NEXT: vl1re16.v v18, (a6) ; ZVBB-NEXT: vl1re16.v v11, (a7) ; ZVBB-NEXT: vl1re16.v v19, (t0) ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v16, (a1) ; ZVBB-NEXT: vl1re16.v v9, (a2) ; ZVBB-NEXT: vl1re16.v v17, (a4) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv64i16( %a, %b, %c, %d, %e, %f, %g, %h) ret %res } define @vector_interleave_nxv32i32_nxv4i32( %a, %b, %c, %d, %e, %f, %g, %h) nounwind { ; ; CHECK-LABEL: vector_interleave_nxv32i32_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv2r.v v28, v22 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vmv2r.v v26, v18 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: add a2, a0, a3 ; CHECK-NEXT: add a4, a1, a3 ; CHECK-NEXT: add a5, a2, a3 ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv2r.v v24, v14 ; CHECK-NEXT: add a6, a4, a3 ; CHECK-NEXT: vmv2r.v v22, v10 ; CHECK-NEXT: vmv1r.v v2, v22 ; CHECK-NEXT: add a7, a5, a3 ; CHECK-NEXT: vmv1r.v v3, v12 ; CHECK-NEXT: add t0, a6, a3 ; CHECK-NEXT: vmv1r.v v4, v24 ; CHECK-NEXT: add t1, a7, a3 ; CHECK-NEXT: vmv1r.v v5, v16 ; CHECK-NEXT: add t2, t0, a3 ; CHECK-NEXT: vmv1r.v v6, v26 ; CHECK-NEXT: add t3, t1, a3 ; CHECK-NEXT: vmv1r.v v7, v20 ; CHECK-NEXT: add t4, t2, a3 ; CHECK-NEXT: vmv1r.v v8, v28 ; CHECK-NEXT: vmv1r.v v22, v9 ; CHECK-NEXT: add t5, t3, a3 ; CHECK-NEXT: vmv1r.v v24, v13 ; CHECK-NEXT: add t6, t4, a3 ; CHECK-NEXT: vmv1r.v v26, v17 ; CHECK-NEXT: vsseg8e32.v v1, (a0) ; CHECK-NEXT: vmv1r.v v28, v21 ; CHECK-NEXT: vsseg8e32.v v22, (a1) ; CHECK-NEXT: vl1re32.v v14, (t5) ; CHECK-NEXT: add t5, t5, a3 ; CHECK-NEXT: add a3, t6, a3 ; CHECK-NEXT: vl1re32.v v22, (t6) ; CHECK-NEXT: vl1re32.v v15, (t5) ; CHECK-NEXT: vl1re32.v v23, (a3) ; CHECK-NEXT: vl1re32.v v12, (t1) ; CHECK-NEXT: vl1re32.v v20, (t2) ; CHECK-NEXT: vl1re32.v v13, (t3) ; CHECK-NEXT: vl1re32.v v21, (t4) ; CHECK-NEXT: vl1re32.v v10, (a5) ; CHECK-NEXT: vl1re32.v v18, (a6) ; CHECK-NEXT: vl1re32.v v11, (a7) ; CHECK-NEXT: vl1re32.v v19, (t0) ; CHECK-NEXT: vl1re32.v v8, (a0) ; CHECK-NEXT: vl1re32.v v16, (a1) ; CHECK-NEXT: vl1re32.v v9, (a2) ; CHECK-NEXT: vl1re32.v v17, (a4) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32i32_nxv4i32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-NEXT: vmv2r.v v28, v22 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: vmv2r.v v26, v18 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 3 ; ZVBB-NEXT: add a1, sp, a1 ; ZVBB-NEXT: addi a1, a1, 16 ; ZVBB-NEXT: csrr a3, vlenb ; ZVBB-NEXT: add a2, a0, a3 ; ZVBB-NEXT: add a4, a1, a3 ; ZVBB-NEXT: add a5, a2, a3 ; ZVBB-NEXT: vmv1r.v v1, v8 ; ZVBB-NEXT: vmv2r.v v24, v14 ; ZVBB-NEXT: add a6, a4, a3 ; ZVBB-NEXT: vmv2r.v v22, v10 ; ZVBB-NEXT: vmv1r.v v2, v22 ; ZVBB-NEXT: add a7, a5, a3 ; ZVBB-NEXT: vmv1r.v v3, v12 ; ZVBB-NEXT: add t0, a6, a3 ; ZVBB-NEXT: vmv1r.v v4, v24 ; ZVBB-NEXT: add t1, a7, a3 ; ZVBB-NEXT: vmv1r.v v5, v16 ; ZVBB-NEXT: add t2, t0, a3 ; ZVBB-NEXT: vmv1r.v v6, v26 ; ZVBB-NEXT: add t3, t1, a3 ; ZVBB-NEXT: vmv1r.v v7, v20 ; ZVBB-NEXT: add t4, t2, a3 ; ZVBB-NEXT: vmv1r.v v8, v28 ; ZVBB-NEXT: vmv1r.v v22, v9 ; ZVBB-NEXT: add t5, t3, a3 ; ZVBB-NEXT: vmv1r.v v24, v13 ; ZVBB-NEXT: add t6, t4, a3 ; ZVBB-NEXT: vmv1r.v v26, v17 ; ZVBB-NEXT: vsseg8e32.v v1, (a0) ; ZVBB-NEXT: vmv1r.v v28, v21 ; ZVBB-NEXT: vsseg8e32.v v22, (a1) ; ZVBB-NEXT: vl1re32.v v14, (t5) ; ZVBB-NEXT: add t5, t5, a3 ; ZVBB-NEXT: add a3, t6, a3 ; ZVBB-NEXT: vl1re32.v v22, (t6) ; ZVBB-NEXT: vl1re32.v v15, (t5) ; ZVBB-NEXT: vl1re32.v v23, (a3) ; ZVBB-NEXT: vl1re32.v v12, (t1) ; ZVBB-NEXT: vl1re32.v v20, (t2) ; ZVBB-NEXT: vl1re32.v v13, (t3) ; ZVBB-NEXT: vl1re32.v v21, (t4) ; ZVBB-NEXT: vl1re32.v v10, (a5) ; ZVBB-NEXT: vl1re32.v v18, (a6) ; ZVBB-NEXT: vl1re32.v v11, (a7) ; ZVBB-NEXT: vl1re32.v v19, (t0) ; ZVBB-NEXT: vl1re32.v v8, (a0) ; ZVBB-NEXT: vl1re32.v v16, (a1) ; ZVBB-NEXT: vl1re32.v v9, (a2) ; ZVBB-NEXT: vl1re32.v v17, (a4) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv32i32( %a, %b, %c, %d, %e, %f, %g, %h) ret %res } define @vector_interleave_nxv16i64_nxv2i64( %a, %b, %c, %d, %e, %f, %g, %h) nounwind { ; ; CHECK-LABEL: vector_interleave_nxv16i64_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv2r.v v28, v22 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vmv2r.v v26, v18 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: add a2, a0, a3 ; CHECK-NEXT: add a4, a1, a3 ; CHECK-NEXT: add a5, a2, a3 ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv2r.v v24, v14 ; CHECK-NEXT: add a6, a4, a3 ; CHECK-NEXT: vmv2r.v v22, v10 ; CHECK-NEXT: vmv1r.v v2, v22 ; CHECK-NEXT: add a7, a5, a3 ; CHECK-NEXT: vmv1r.v v3, v12 ; CHECK-NEXT: add t0, a6, a3 ; CHECK-NEXT: vmv1r.v v4, v24 ; CHECK-NEXT: add t1, a7, a3 ; CHECK-NEXT: vmv1r.v v5, v16 ; CHECK-NEXT: add t2, t0, a3 ; CHECK-NEXT: vmv1r.v v6, v26 ; CHECK-NEXT: add t3, t1, a3 ; CHECK-NEXT: vmv1r.v v7, v20 ; CHECK-NEXT: add t4, t2, a3 ; CHECK-NEXT: vmv1r.v v8, v28 ; CHECK-NEXT: vmv1r.v v22, v9 ; CHECK-NEXT: add t5, t3, a3 ; CHECK-NEXT: vmv1r.v v24, v13 ; CHECK-NEXT: add t6, t4, a3 ; CHECK-NEXT: vmv1r.v v26, v17 ; CHECK-NEXT: vsseg8e64.v v1, (a0) ; CHECK-NEXT: vmv1r.v v28, v21 ; CHECK-NEXT: vsseg8e64.v v22, (a1) ; CHECK-NEXT: vl1re64.v v14, (t5) ; CHECK-NEXT: add t5, t5, a3 ; CHECK-NEXT: add a3, t6, a3 ; CHECK-NEXT: vl1re64.v v22, (t6) ; CHECK-NEXT: vl1re64.v v15, (t5) ; CHECK-NEXT: vl1re64.v v23, (a3) ; CHECK-NEXT: vl1re64.v v12, (t1) ; CHECK-NEXT: vl1re64.v v20, (t2) ; CHECK-NEXT: vl1re64.v v13, (t3) ; CHECK-NEXT: vl1re64.v v21, (t4) ; CHECK-NEXT: vl1re64.v v10, (a5) ; CHECK-NEXT: vl1re64.v v18, (a6) ; CHECK-NEXT: vl1re64.v v11, (a7) ; CHECK-NEXT: vl1re64.v v19, (t0) ; CHECK-NEXT: vl1re64.v v8, (a0) ; CHECK-NEXT: vl1re64.v v16, (a1) ; CHECK-NEXT: vl1re64.v v9, (a2) ; CHECK-NEXT: vl1re64.v v17, (a4) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16i64_nxv2i64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZVBB-NEXT: vmv2r.v v28, v22 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: vmv2r.v v26, v18 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 3 ; ZVBB-NEXT: add a1, sp, a1 ; ZVBB-NEXT: addi a1, a1, 16 ; ZVBB-NEXT: csrr a3, vlenb ; ZVBB-NEXT: add a2, a0, a3 ; ZVBB-NEXT: add a4, a1, a3 ; ZVBB-NEXT: add a5, a2, a3 ; ZVBB-NEXT: vmv1r.v v1, v8 ; ZVBB-NEXT: vmv2r.v v24, v14 ; ZVBB-NEXT: add a6, a4, a3 ; ZVBB-NEXT: vmv2r.v v22, v10 ; ZVBB-NEXT: vmv1r.v v2, v22 ; ZVBB-NEXT: add a7, a5, a3 ; ZVBB-NEXT: vmv1r.v v3, v12 ; ZVBB-NEXT: add t0, a6, a3 ; ZVBB-NEXT: vmv1r.v v4, v24 ; ZVBB-NEXT: add t1, a7, a3 ; ZVBB-NEXT: vmv1r.v v5, v16 ; ZVBB-NEXT: add t2, t0, a3 ; ZVBB-NEXT: vmv1r.v v6, v26 ; ZVBB-NEXT: add t3, t1, a3 ; ZVBB-NEXT: vmv1r.v v7, v20 ; ZVBB-NEXT: add t4, t2, a3 ; ZVBB-NEXT: vmv1r.v v8, v28 ; ZVBB-NEXT: vmv1r.v v22, v9 ; ZVBB-NEXT: add t5, t3, a3 ; ZVBB-NEXT: vmv1r.v v24, v13 ; ZVBB-NEXT: add t6, t4, a3 ; ZVBB-NEXT: vmv1r.v v26, v17 ; ZVBB-NEXT: vsseg8e64.v v1, (a0) ; ZVBB-NEXT: vmv1r.v v28, v21 ; ZVBB-NEXT: vsseg8e64.v v22, (a1) ; ZVBB-NEXT: vl1re64.v v14, (t5) ; ZVBB-NEXT: add t5, t5, a3 ; ZVBB-NEXT: add a3, t6, a3 ; ZVBB-NEXT: vl1re64.v v22, (t6) ; ZVBB-NEXT: vl1re64.v v15, (t5) ; ZVBB-NEXT: vl1re64.v v23, (a3) ; ZVBB-NEXT: vl1re64.v v12, (t1) ; ZVBB-NEXT: vl1re64.v v20, (t2) ; ZVBB-NEXT: vl1re64.v v13, (t3) ; ZVBB-NEXT: vl1re64.v v21, (t4) ; ZVBB-NEXT: vl1re64.v v10, (a5) ; ZVBB-NEXT: vl1re64.v v18, (a6) ; ZVBB-NEXT: vl1re64.v v11, (a7) ; ZVBB-NEXT: vl1re64.v v19, (t0) ; ZVBB-NEXT: vl1re64.v v8, (a0) ; ZVBB-NEXT: vl1re64.v v16, (a1) ; ZVBB-NEXT: vl1re64.v v9, (a2) ; ZVBB-NEXT: vl1re64.v v17, (a4) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv16i64( %a, %b, %c, %d, %e, %f, %g, %h) ret %res } ; Floats define @vector_interleave_nxv4bf16_nxv2bf16( %a, %b) { ; V-LABEL: vector_interleave_nxv4bf16_nxv2bf16: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; V-NEXT: vwaddu.vv v10, v8, v9 ; V-NEXT: li a0, -1 ; V-NEXT: vwmaccu.vx v10, a0, v9 ; V-NEXT: csrr a0, vlenb ; V-NEXT: srli a0, a0, 2 ; V-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; V-NEXT: vslidedown.vx v8, v10, a0 ; V-NEXT: vslideup.vx v10, v8, a0 ; V-NEXT: vmv.v.v v8, v10 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv4bf16_nxv2bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vwsll.vi v10, v9, 16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: vwaddu.wv v10, v10, v8 ; ZVBB-NEXT: srli a0, a0, 2 ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslidedown.vx v8, v10, a0 ; ZVBB-NEXT: vslideup.vx v10, v8, a0 ; ZVBB-NEXT: vmv.v.v v8, v10 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv4bf16_nxv2bf16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZIP-NEXT: ri.vzip2b.vv v11, v8, v9 ; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: srli a0, a0, 2 ; ZIP-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; ZIP-NEXT: vslideup.vx v10, v11, a0 ; ZIP-NEXT: vmv.v.v v8, v10 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv4bf16( %a, %b) ret %res } define @vector_interleave_nxv8bf16_nxv4bf16( %a, %b) { ; V-LABEL: vector_interleave_nxv8bf16_nxv4bf16: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; V-NEXT: vmv1r.v v10, v9 ; V-NEXT: vmv1r.v v11, v8 ; V-NEXT: vwaddu.vv v8, v11, v10 ; V-NEXT: li a0, -1 ; V-NEXT: vwmaccu.vx v8, a0, v10 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8bf16_nxv4bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vmv1r.v v10, v9 ; ZVBB-NEXT: vmv1r.v v11, v8 ; ZVBB-NEXT: vwsll.vi v8, v10, 16 ; ZVBB-NEXT: vwaddu.wv v8, v8, v11 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv8bf16_nxv4bf16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZIP-NEXT: vmv1r.v v10, v9 ; ZIP-NEXT: vmv1r.v v11, v8 ; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10 ; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv8bf16( %a, %b) ret %res } define @vector_interleave_nxv4f16_nxv2f16( %a, %b) { ; V-LABEL: vector_interleave_nxv4f16_nxv2f16: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; V-NEXT: vwaddu.vv v10, v8, v9 ; V-NEXT: li a0, -1 ; V-NEXT: vwmaccu.vx v10, a0, v9 ; V-NEXT: csrr a0, vlenb ; V-NEXT: srli a0, a0, 2 ; V-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; V-NEXT: vslidedown.vx v8, v10, a0 ; V-NEXT: vslideup.vx v10, v8, a0 ; V-NEXT: vmv.v.v v8, v10 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv4f16_nxv2f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vwsll.vi v10, v9, 16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: vwaddu.wv v10, v10, v8 ; ZVBB-NEXT: srli a0, a0, 2 ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslidedown.vx v8, v10, a0 ; ZVBB-NEXT: vslideup.vx v10, v8, a0 ; ZVBB-NEXT: vmv.v.v v8, v10 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv4f16_nxv2f16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZIP-NEXT: ri.vzip2b.vv v11, v8, v9 ; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: srli a0, a0, 2 ; ZIP-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; ZIP-NEXT: vslideup.vx v10, v11, a0 ; ZIP-NEXT: vmv.v.v v8, v10 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv4f16( %a, %b) ret %res } define @vector_interleave_nxv8f16_nxv4f16( %a, %b) { ; V-LABEL: vector_interleave_nxv8f16_nxv4f16: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; V-NEXT: vmv1r.v v10, v9 ; V-NEXT: vmv1r.v v11, v8 ; V-NEXT: vwaddu.vv v8, v11, v10 ; V-NEXT: li a0, -1 ; V-NEXT: vwmaccu.vx v8, a0, v10 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8f16_nxv4f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vmv1r.v v10, v9 ; ZVBB-NEXT: vmv1r.v v11, v8 ; ZVBB-NEXT: vwsll.vi v8, v10, 16 ; ZVBB-NEXT: vwaddu.wv v8, v8, v11 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv8f16_nxv4f16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZIP-NEXT: vmv1r.v v10, v9 ; ZIP-NEXT: vmv1r.v v11, v8 ; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10 ; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv8f16( %a, %b) ret %res } define @vector_interleave_nxv4f32_nxv2f32( %a, %b) { ; V-LABEL: vector_interleave_nxv4f32_nxv2f32: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; V-NEXT: vmv1r.v v10, v9 ; V-NEXT: vmv1r.v v11, v8 ; V-NEXT: vwaddu.vv v8, v11, v10 ; V-NEXT: li a0, -1 ; V-NEXT: vwmaccu.vx v8, a0, v10 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv4f32_nxv2f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-NEXT: vmv1r.v v10, v9 ; ZVBB-NEXT: vmv1r.v v11, v8 ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vwsll.vx v8, v10, a0 ; ZVBB-NEXT: vwaddu.wv v8, v8, v11 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv4f32_nxv2f32: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZIP-NEXT: vmv1r.v v10, v9 ; ZIP-NEXT: vmv1r.v v11, v8 ; ZIP-NEXT: ri.vzip2b.vv v9, v8, v10 ; ZIP-NEXT: ri.vzip2a.vv v8, v11, v10 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv4f32( %a, %b) ret %res } define @vector_interleave_nxv16bf16_nxv8bf16( %a, %b) { ; V-LABEL: vector_interleave_nxv16bf16_nxv8bf16: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; V-NEXT: vmv2r.v v12, v10 ; V-NEXT: vmv2r.v v14, v8 ; V-NEXT: vwaddu.vv v8, v14, v12 ; V-NEXT: li a0, -1 ; V-NEXT: vwmaccu.vx v8, a0, v12 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16bf16_nxv8bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZVBB-NEXT: vmv2r.v v12, v10 ; ZVBB-NEXT: vmv2r.v v14, v8 ; ZVBB-NEXT: vwsll.vi v8, v12, 16 ; ZVBB-NEXT: vwaddu.wv v8, v8, v14 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv16bf16_nxv8bf16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZIP-NEXT: vmv2r.v v12, v10 ; ZIP-NEXT: vmv2r.v v14, v8 ; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 ; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv16bf16( %a, %b) ret %res } define @vector_interleave_nxv16f16_nxv8f16( %a, %b) { ; V-LABEL: vector_interleave_nxv16f16_nxv8f16: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; V-NEXT: vmv2r.v v12, v10 ; V-NEXT: vmv2r.v v14, v8 ; V-NEXT: vwaddu.vv v8, v14, v12 ; V-NEXT: li a0, -1 ; V-NEXT: vwmaccu.vx v8, a0, v12 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16f16_nxv8f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZVBB-NEXT: vmv2r.v v12, v10 ; ZVBB-NEXT: vmv2r.v v14, v8 ; ZVBB-NEXT: vwsll.vi v8, v12, 16 ; ZVBB-NEXT: vwaddu.wv v8, v8, v14 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv16f16_nxv8f16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZIP-NEXT: vmv2r.v v12, v10 ; ZIP-NEXT: vmv2r.v v14, v8 ; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 ; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv16f16( %a, %b) ret %res } define @vector_interleave_nxv8f32_nxv4f32( %a, %b) { ; V-LABEL: vector_interleave_nxv8f32_nxv4f32: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; V-NEXT: vmv2r.v v12, v10 ; V-NEXT: vmv2r.v v14, v8 ; V-NEXT: vwaddu.vv v8, v14, v12 ; V-NEXT: li a0, -1 ; V-NEXT: vwmaccu.vx v8, a0, v12 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8f32_nxv4f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; ZVBB-NEXT: vmv2r.v v12, v10 ; ZVBB-NEXT: vmv2r.v v14, v8 ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vwsll.vx v8, v12, a0 ; ZVBB-NEXT: vwaddu.wv v8, v8, v14 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv8f32_nxv4f32: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; ZIP-NEXT: vmv2r.v v12, v10 ; ZIP-NEXT: vmv2r.v v14, v8 ; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 ; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv8f32( %a, %b) ret %res } define @vector_interleave_nxv4f64_nxv2f64( %a, %b) { ; V-LABEL: vector_interleave_nxv4f64_nxv2f64: ; V: # %bb.0: ; V-NEXT: csrr a0, vlenb ; V-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; V-NEXT: vid.v v12 ; V-NEXT: srli a0, a0, 2 ; V-NEXT: vand.vi v13, v12, 1 ; V-NEXT: vmsne.vi v0, v13, 0 ; V-NEXT: vsrl.vi v16, v12, 1 ; V-NEXT: vadd.vx v16, v16, a0, v0.t ; V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; V-NEXT: vrgatherei16.vv v12, v8, v16 ; V-NEXT: vmv.v.v v8, v12 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv4f64_nxv2f64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; ZVBB-NEXT: vid.v v12 ; ZVBB-NEXT: srli a0, a0, 2 ; ZVBB-NEXT: vand.vi v13, v12, 1 ; ZVBB-NEXT: vmsne.vi v0, v13, 0 ; ZVBB-NEXT: vsrl.vi v16, v12, 1 ; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t ; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16 ; ZVBB-NEXT: vmv.v.v v8, v12 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv4f64_nxv2f64: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; ZIP-NEXT: vmv2r.v v12, v10 ; ZIP-NEXT: vmv2r.v v14, v8 ; ZIP-NEXT: ri.vzip2b.vv v10, v8, v12 ; ZIP-NEXT: ri.vzip2a.vv v8, v14, v12 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv4f64( %a, %b) ret %res } define @vector_interleave_nxv64bf16_nxv32bf16( %a, %b) { ; V-LABEL: vector_interleave_nxv64bf16_nxv32bf16: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; V-NEXT: vmv8r.v v24, v8 ; V-NEXT: vwaddu.vv v8, v24, v16 ; V-NEXT: li a0, -1 ; V-NEXT: vwaddu.vv v0, v28, v20 ; V-NEXT: vwmaccu.vx v8, a0, v16 ; V-NEXT: vwmaccu.vx v0, a0, v20 ; V-NEXT: vmv8r.v v16, v0 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv64bf16_nxv32bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVBB-NEXT: vwsll.vi v24, v16, 16 ; ZVBB-NEXT: vwsll.vi v0, v20, 16 ; ZVBB-NEXT: vwaddu.wv v24, v24, v8 ; ZVBB-NEXT: vwaddu.wv v0, v0, v12 ; ZVBB-NEXT: vmv8r.v v8, v24 ; ZVBB-NEXT: vmv8r.v v16, v0 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv64bf16_nxv32bf16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 ; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 ; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 ; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 ; ZIP-NEXT: vmv8r.v v8, v24 ; ZIP-NEXT: vmv8r.v v16, v0 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv64bf16( %a, %b) ret %res } define @vector_interleave_nxv64f16_nxv32f16( %a, %b) { ; V-LABEL: vector_interleave_nxv64f16_nxv32f16: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; V-NEXT: vmv8r.v v24, v8 ; V-NEXT: vwaddu.vv v8, v24, v16 ; V-NEXT: li a0, -1 ; V-NEXT: vwaddu.vv v0, v28, v20 ; V-NEXT: vwmaccu.vx v8, a0, v16 ; V-NEXT: vwmaccu.vx v0, a0, v20 ; V-NEXT: vmv8r.v v16, v0 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv64f16_nxv32f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVBB-NEXT: vwsll.vi v24, v16, 16 ; ZVBB-NEXT: vwsll.vi v0, v20, 16 ; ZVBB-NEXT: vwaddu.wv v24, v24, v8 ; ZVBB-NEXT: vwaddu.wv v0, v0, v12 ; ZVBB-NEXT: vmv8r.v v8, v24 ; ZVBB-NEXT: vmv8r.v v16, v0 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv64f16_nxv32f16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 ; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 ; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 ; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 ; ZIP-NEXT: vmv8r.v v8, v24 ; ZIP-NEXT: vmv8r.v v16, v0 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv64f16( %a, %b) ret %res } define @vector_interleave_nxv32f32_nxv16f32( %a, %b) { ; V-LABEL: vector_interleave_nxv32f32_nxv16f32: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; V-NEXT: vmv8r.v v24, v8 ; V-NEXT: vwaddu.vv v8, v24, v16 ; V-NEXT: li a0, -1 ; V-NEXT: vwaddu.vv v0, v28, v20 ; V-NEXT: vwmaccu.vx v8, a0, v16 ; V-NEXT: vwmaccu.vx v0, a0, v20 ; V-NEXT: vmv8r.v v16, v0 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; ZVBB-NEXT: vwsll.vx v24, v16, a0 ; ZVBB-NEXT: vwsll.vx v0, v20, a0 ; ZVBB-NEXT: vwaddu.wv v24, v24, v8 ; ZVBB-NEXT: vwaddu.wv v0, v0, v12 ; ZVBB-NEXT: vmv8r.v v8, v24 ; ZVBB-NEXT: vmv8r.v v16, v0 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv32f32_nxv16f32: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 ; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 ; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 ; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 ; ZIP-NEXT: vmv8r.v v8, v24 ; ZIP-NEXT: vmv8r.v v16, v0 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv32f32( %a, %b) ret %res } define @vector_interleave_nxv16f64_nxv8f64( %a, %b) { ; V-LABEL: vector_interleave_nxv16f64_nxv8f64: ; V: # %bb.0: ; V-NEXT: csrr a0, vlenb ; V-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; V-NEXT: vid.v v6 ; V-NEXT: vmv8r.v v24, v8 ; V-NEXT: srli a0, a0, 1 ; V-NEXT: vmv4r.v v28, v16 ; V-NEXT: vmv4r.v v16, v12 ; V-NEXT: vand.vi v8, v6, 1 ; V-NEXT: vmsne.vi v0, v8, 0 ; V-NEXT: vsrl.vi v6, v6, 1 ; V-NEXT: vadd.vx v6, v6, a0, v0.t ; V-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; V-NEXT: vrgatherei16.vv v8, v24, v6 ; V-NEXT: vrgatherei16.vv v24, v16, v6 ; V-NEXT: vmv.v.v v16, v24 ; V-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16f64_nxv8f64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; ZVBB-NEXT: vid.v v6 ; ZVBB-NEXT: vmv8r.v v24, v8 ; ZVBB-NEXT: srli a0, a0, 1 ; ZVBB-NEXT: vmv4r.v v28, v16 ; ZVBB-NEXT: vmv4r.v v16, v12 ; ZVBB-NEXT: vand.vi v8, v6, 1 ; ZVBB-NEXT: vmsne.vi v0, v8, 0 ; ZVBB-NEXT: vsrl.vi v6, v6, 1 ; ZVBB-NEXT: vadd.vx v6, v6, a0, v0.t ; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; ZVBB-NEXT: vrgatherei16.vv v8, v24, v6 ; ZVBB-NEXT: vrgatherei16.vv v24, v16, v6 ; ZVBB-NEXT: vmv.v.v v16, v24 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv16f64_nxv8f64: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; ZIP-NEXT: ri.vzip2b.vv v28, v8, v16 ; ZIP-NEXT: ri.vzip2b.vv v4, v12, v20 ; ZIP-NEXT: ri.vzip2a.vv v24, v8, v16 ; ZIP-NEXT: ri.vzip2a.vv v0, v12, v20 ; ZIP-NEXT: vmv8r.v v8, v24 ; ZIP-NEXT: vmv8r.v v16, v0 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave2.nxv16f64( %a, %b) ret %res } define @vector_interleave_nxv6f16_nxv2f16( %v0, %v1, %v2) nounwind { ; CHECK-LABEL: vector_interleave_nxv6f16_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: vle16.v v9, (a3) ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: add a2, a3, a2 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv6f16_nxv2f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: vsetvli a3, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vsseg3e16.v v8, (a0) ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: vle16.v v9, (a3) ; ZVBB-NEXT: vle16.v v8, (a0) ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v9, a1 ; ZVBB-NEXT: add a2, a3, a2 ; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v9, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv6f16( %v0, %v1, %v2) ret %res } define @vector_interleave_nxv12f16_nxv4f16( %v0, %v1, %v2) nounwind { ; CHECK-LABEL: vector_interleave_nxv12f16_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl1re16.v v9, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl1re16.v v10, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv12f16_nxv4f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vsseg3e16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl1re16.v v9, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl1re16.v v10, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv12f16( %v0, %v1, %v2) ret %res } define @vector_interleave_nxv24f16_nxv8f16( %v0, %v1, %v2) nounwind { ; CHECK-LABEL: vector_interleave_nxv24f16_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: vl2re16.v v8, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2re16.v v10, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2re16.v v12, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv24f16_nxv8f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: vsetvli a2, zero, e16, m2, ta, ma ; ZVBB-NEXT: vsseg3e16.v v8, (a0) ; ZVBB-NEXT: vl2re16.v v8, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2re16.v v10, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2re16.v v12, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv24f16( %v0, %v1, %v2) ret %res } define @vector_interleave_nxv6bf16_nxv2bf16( %v0, %v1, %v2) nounwind { ; CHECK-LABEL: vector_interleave_nxv6bf16_nxv2bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: vle16.v v9, (a3) ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: add a2, a3, a2 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv6bf16_nxv2bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: vsetvli a3, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vsseg3e16.v v8, (a0) ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: vle16.v v9, (a3) ; ZVBB-NEXT: vle16.v v8, (a0) ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v9, a1 ; ZVBB-NEXT: add a2, a3, a2 ; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v9, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv6bf16( %v0, %v1, %v2) ret %res } define @vector_interleave_nxv12bf16_nxv4bf16( %v0, %v1, %v2) nounwind { ; CHECK-LABEL: vector_interleave_nxv12bf16_nxv4bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl1re16.v v9, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl1re16.v v10, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv12bf16_nxv4bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vsseg3e16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl1re16.v v9, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl1re16.v v10, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv12bf16( %v0, %v1, %v2) ret %res } define @vector_interleave_nxv24bf16_nxv8bf16( %v0, %v1, %v2) nounwind { ; CHECK-LABEL: vector_interleave_nxv24bf16_nxv8bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: vl2re16.v v8, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2re16.v v10, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2re16.v v12, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv24bf16_nxv8bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: vsetvli a2, zero, e16, m2, ta, ma ; ZVBB-NEXT: vsseg3e16.v v8, (a0) ; ZVBB-NEXT: vl2re16.v v8, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2re16.v v10, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2re16.v v12, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv24bf16( %v0, %v1, %v2) ret %res } define @vector_interleave_nxv3f32_nxv1f32( %v0, %v1, %v2) nounwind { ; CHECK-LABEL: vector_interleave_nxv3f32_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: vsetvli a3, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: vle32.v v9, (a3) ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: srli a1, a1, 3 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: add a2, a3, a2 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv3f32_nxv1f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: vsetvli a3, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vsseg3e32.v v8, (a0) ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: vle32.v v9, (a3) ; ZVBB-NEXT: vle32.v v8, (a0) ; ZVBB-NEXT: srli a1, a1, 3 ; ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v9, a1 ; ZVBB-NEXT: add a2, a3, a2 ; ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vle32.v v9, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv3f32( %v0, %v1, %v2) ret %res } define @vector_interleave_nxv6f32_nxv2f32( %v0, %v1, %v2) nounwind { ; CHECK-LABEL: vector_interleave_nxv6f32_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: vl1re32.v v8, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl1re32.v v9, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl1re32.v v10, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv6f32_nxv2f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; ZVBB-NEXT: vsseg3e32.v v8, (a0) ; ZVBB-NEXT: vl1re32.v v8, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl1re32.v v9, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl1re32.v v10, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv6f32( %v0, %v1, %v2) ret %res } define @vector_interleave_nxv12f32_nxv4f32( %v0, %v1, %v2) nounwind { ; CHECK-LABEL: vector_interleave_nxv12f32_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: vl2re32.v v8, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2re32.v v10, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2re32.v v12, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv12f32_nxv4f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: vsetvli a2, zero, e32, m2, ta, ma ; ZVBB-NEXT: vsseg3e32.v v8, (a0) ; ZVBB-NEXT: vl2re32.v v8, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2re32.v v10, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2re32.v v12, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv12f32( %v0, %v1, %v2) ret %res } define @vector_interleave_nxv3f64_nxv1f64( %v0, %v1, %v2) nounwind { ; CHECK-LABEL: vector_interleave_nxv3f64_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: vl1re64.v v8, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl1re64.v v9, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl1re64.v v10, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv3f64_nxv1f64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; ZVBB-NEXT: vsseg3e64.v v8, (a0) ; ZVBB-NEXT: vl1re64.v v8, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl1re64.v v9, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl1re64.v v10, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv3f64( %v0, %v1, %v2) ret %res } define @vector_interleave_nxv6f64_nxv2f64( %v0, %v1, %v2) nounwind { ; CHECK-LABEL: vector_interleave_nxv6f64_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: vl2re64.v v8, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2re64.v v10, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl2re64.v v12, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv6f64_nxv2f64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: vsetvli a2, zero, e64, m2, ta, ma ; ZVBB-NEXT: vsseg3e64.v v8, (a0) ; ZVBB-NEXT: vl2re64.v v8, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2re64.v v10, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl2re64.v v12, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave3.nxv6f64( %v0, %v1, %v2) ret %res } define @vector_interleave_nxv8f16_nxv2f16( %v0, %v1, %v2, %v3) nounwind { ; CHECK-LABEL: vector_interleave_nxv8f16_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: vsetvli a4, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: add a2, a4, a2 ; CHECK-NEXT: vle16.v v9, (a4) ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v10, (a3) ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8f16_nxv2f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: vsetvli a4, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vsseg4e16.v v8, (a0) ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: add a2, a4, a2 ; ZVBB-NEXT: vle16.v v9, (a4) ; ZVBB-NEXT: vle16.v v8, (a2) ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v8, a1 ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v10, (a3) ; ZVBB-NEXT: vle16.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v10, a1 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv8f16( %v0, %v1, %v2, %v3) ret %res } define @vector_interleave_nxv16f16_nxv4f16( %v0, %v1, %v2, %v3) nounwind { ; CHECK-LABEL: vector_interleave_nxv16f16_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v10, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1re16.v v11, (a1) ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v9, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16f16_nxv4f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; ZVBB-NEXT: vsseg4e16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v10, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1re16.v v11, (a1) ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v9, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv16f16( %v0, %v1, %v2, %v3) ret %res } define @vector_interleave_nxv32f16_nxv8f16( %v0, %v1, %v2, %v3) nounwind { ; CHECK-LABEL: vector_interleave_nxv32f16_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: vsetvli a3, zero, e16, m2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vl2re16.v v12, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl2re16.v v14, (a1) ; CHECK-NEXT: vl2re16.v v8, (a0) ; CHECK-NEXT: vl2re16.v v10, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32f16_nxv8f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: vsetvli a3, zero, e16, m2, ta, ma ; ZVBB-NEXT: vsseg4e16.v v8, (a0) ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vl2re16.v v12, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl2re16.v v14, (a1) ; ZVBB-NEXT: vl2re16.v v8, (a0) ; ZVBB-NEXT: vl2re16.v v10, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv32f16( %v0, %v1, %v2, %v3) ret %res } define @vector_interleave_nxv8bf16_nxv2bf16( %v0, %v1, %v2, %v3) nounwind { ; CHECK-LABEL: vector_interleave_nxv8bf16_nxv2bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: vsetvli a4, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: add a2, a4, a2 ; CHECK-NEXT: vle16.v v9, (a4) ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v10, (a3) ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8bf16_nxv2bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: vsetvli a4, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vsseg4e16.v v8, (a0) ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: add a2, a4, a2 ; ZVBB-NEXT: vle16.v v9, (a4) ; ZVBB-NEXT: vle16.v v8, (a2) ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v8, a1 ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v10, (a3) ; ZVBB-NEXT: vle16.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v10, a1 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv8bf16( %v0, %v1, %v2, %v3) ret %res } define @vector_interleave_nxv16bf16_nxv4bf16( %v0, %v1, %v2, %v3) nounwind { ; CHECK-LABEL: vector_interleave_nxv16bf16_nxv4bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v10, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1re16.v v11, (a1) ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v9, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16bf16_nxv4bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; ZVBB-NEXT: vsseg4e16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v10, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1re16.v v11, (a1) ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v9, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv16bf16( %v0, %v1, %v2, %v3) ret %res } define @vector_interleave_nxv32bf16_nxv8bf16( %v0, %v1, %v2, %v3) nounwind { ; CHECK-LABEL: vector_interleave_nxv32bf16_nxv8bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: vsetvli a3, zero, e16, m2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vl2re16.v v12, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl2re16.v v14, (a1) ; CHECK-NEXT: vl2re16.v v8, (a0) ; CHECK-NEXT: vl2re16.v v10, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32bf16_nxv8bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: vsetvli a3, zero, e16, m2, ta, ma ; ZVBB-NEXT: vsseg4e16.v v8, (a0) ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vl2re16.v v12, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl2re16.v v14, (a1) ; ZVBB-NEXT: vl2re16.v v8, (a0) ; ZVBB-NEXT: vl2re16.v v10, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv32bf16( %v0, %v1, %v2, %v3) ret %res } define @vector_interleave_nxv4f32_nxv1f32( %v0, %v1, %v2, %v3) nounwind { ; CHECK-LABEL: vector_interleave_nxv4f32_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: vsetvli a4, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: add a2, a4, a2 ; CHECK-NEXT: vle32.v v9, (a4) ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: srli a1, a1, 3 ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v10, (a3) ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv4f32_nxv1f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: vsetvli a4, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vsseg4e32.v v8, (a0) ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: add a2, a4, a2 ; ZVBB-NEXT: vle32.v v9, (a4) ; ZVBB-NEXT: vle32.v v8, (a2) ; ZVBB-NEXT: srli a1, a1, 3 ; ZVBB-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v8, a1 ; ZVBB-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vle32.v v10, (a3) ; ZVBB-NEXT: vle32.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v10, a1 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv4f32( %v0, %v1, %v2, %v3) ret %res } define @vector_interleave_nxv8f32_nxv2f32( %v0, %v1, %v2, %v3) nounwind { ; CHECK-LABEL: vector_interleave_nxv8f32_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e32, m1, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: vl1re32.v v10, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1re32.v v11, (a1) ; CHECK-NEXT: vl1re32.v v8, (a0) ; CHECK-NEXT: vl1re32.v v9, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8f32_nxv2f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e32, m1, ta, ma ; ZVBB-NEXT: vsseg4e32.v v8, (a0) ; ZVBB-NEXT: vl1re32.v v10, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1re32.v v11, (a1) ; ZVBB-NEXT: vl1re32.v v8, (a0) ; ZVBB-NEXT: vl1re32.v v9, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv8f32( %v0, %v1, %v2, %v3) ret %res } define @vector_interleave_nxv16f32_nxv4f32( %v0, %v1, %v2, %v3) nounwind { ; CHECK-LABEL: vector_interleave_nxv16f32_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: vsetvli a3, zero, e32, m2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vl2re32.v v12, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl2re32.v v14, (a1) ; CHECK-NEXT: vl2re32.v v8, (a0) ; CHECK-NEXT: vl2re32.v v10, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16f32_nxv4f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: vsetvli a3, zero, e32, m2, ta, ma ; ZVBB-NEXT: vsseg4e32.v v8, (a0) ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vl2re32.v v12, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl2re32.v v14, (a1) ; ZVBB-NEXT: vl2re32.v v8, (a0) ; ZVBB-NEXT: vl2re32.v v10, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv16f32( %v0, %v1, %v2, %v3) ret %res } define @vector_interleave_nxv4f64_nxv1f64( %v0, %v1, %v2, %v3) nounwind { ; CHECK-LABEL: vector_interleave_nxv4f64_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e64, m1, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: vl1re64.v v10, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1re64.v v11, (a1) ; CHECK-NEXT: vl1re64.v v8, (a0) ; CHECK-NEXT: vl1re64.v v9, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv4f64_nxv1f64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e64, m1, ta, ma ; ZVBB-NEXT: vsseg4e64.v v8, (a0) ; ZVBB-NEXT: vl1re64.v v10, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1re64.v v11, (a1) ; ZVBB-NEXT: vl1re64.v v8, (a0) ; ZVBB-NEXT: vl1re64.v v9, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv4f64( %v0, %v1, %v2, %v3) ret %res } define @vector_interleave_nxv8f64_nxv2f64( %v0, %v1, %v2, %v3) nounwind { ; CHECK-LABEL: vector_interleave_nxv8f64_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: vsetvli a3, zero, e64, m2, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vl2re64.v v12, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl2re64.v v14, (a1) ; CHECK-NEXT: vl2re64.v v8, (a0) ; CHECK-NEXT: vl2re64.v v10, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8f64_nxv2f64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 1 ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: vsetvli a3, zero, e64, m2, ta, ma ; ZVBB-NEXT: vsseg4e64.v v8, (a0) ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vl2re64.v v12, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl2re64.v v14, (a1) ; ZVBB-NEXT: vl2re64.v v8, (a0) ; ZVBB-NEXT: vl2re64.v v10, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave4.nxv6f64( %v0, %v1, %v2, %v3) ret %res } define @vector_interleave_nxv10f16_nxv2f16( %v0, %v1, %v2, %v3, %v4) nounwind { ; CHECK-LABEL: vector_interleave_nxv10f16_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: vsetvli a5, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: add a5, a4, a2 ; CHECK-NEXT: vle16.v v8, (a5) ; CHECK-NEXT: vle16.v v9, (a4) ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vle16.v v10, (a3) ; CHECK-NEXT: vsetvli a3, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: add a2, a5, a2 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v10, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv10f16_nxv2f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: vsetvli a5, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vsseg5e16.v v8, (a0) ; ZVBB-NEXT: add a5, a4, a2 ; ZVBB-NEXT: vle16.v v8, (a5) ; ZVBB-NEXT: vle16.v v9, (a4) ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vle16.v v10, (a3) ; ZVBB-NEXT: vsetvli a3, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v8, a1 ; ZVBB-NEXT: vsetvli a3, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v10, a1 ; ZVBB-NEXT: add a2, a5, a2 ; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v10, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave5.nxv10f16( %v0, %v1, %v2, %v3, %v4) ret %res } define @vector_interleave_nxv20f16_nxv4f16( %v0, %v1, %v2, %v3, %v4) nounwind { ; CHECK-LABEL: vector_interleave_nxv20f16_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 2 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re16.v v11, (a3) ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v9, (a2) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1re16.v v12, (a1) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 2 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv20f16_nxv4f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 2 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; ZVBB-NEXT: vsseg5e16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re16.v v11, (a3) ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v9, (a2) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1re16.v v12, (a1) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 2 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave5.nxv20f16( %v0, %v1, %v2, %v3, %v4) ret %res } define @vector_interleave_nxv40f16_nxv8f16( %v0, %v1, %v2, %v3, %v4) nounwind { ; RV32-LABEL: vector_interleave_nxv40f16_nxv8f16: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 28 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v16 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v18, v12 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 2 ; RV32-NEXT: add a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v16, v8 ; RV32-NEXT: vmv2r.v v22, v16 ; RV32-NEXT: vmv2r.v v24, v18 ; RV32-NEXT: vmv1r.v v26, v20 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v23, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: add a5, a4, a2 ; RV32-NEXT: vmv1r.v v25, v14 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v18, v11 ; RV32-NEXT: vsseg5e16.v v22, (a0) ; RV32-NEXT: vmv1r.v v20, v15 ; RV32-NEXT: vsseg5e16.v v17, (a1) ; RV32-NEXT: vl1re16.v v16, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v17, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1re16.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v11, (a6) ; RV32-NEXT: vl1re16.v v8, (a0) ; RV32-NEXT: vl1re16.v v9, (a3) ; RV32-NEXT: vl1re16.v v14, (a4) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 10 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v15, (a5) ; RV32-NEXT: vl1re16.v v12, (a6) ; RV32-NEXT: vl1re16.v v13, (a1) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vs2r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8re16.v v16, (a2) ; RV32-NEXT: vl8re16.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv40f16_nxv8f16: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 28 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v16 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v18, v12 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 2 ; RV64-NEXT: add a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v16, v8 ; RV64-NEXT: vmv2r.v v22, v16 ; RV64-NEXT: vmv2r.v v24, v18 ; RV64-NEXT: vmv1r.v v26, v20 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v23, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: add a5, a4, a2 ; RV64-NEXT: vmv1r.v v25, v14 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v18, v11 ; RV64-NEXT: vsseg5e16.v v22, (a0) ; RV64-NEXT: vmv1r.v v20, v15 ; RV64-NEXT: vsseg5e16.v v17, (a1) ; RV64-NEXT: vl1re16.v v16, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v17, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1re16.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v11, (a6) ; RV64-NEXT: vl1re16.v v8, (a0) ; RV64-NEXT: vl1re16.v v9, (a3) ; RV64-NEXT: vl1re16.v v14, (a4) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 10 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v15, (a5) ; RV64-NEXT: vl1re16.v v12, (a6) ; RV64-NEXT: vl1re16.v v13, (a1) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vs2r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8re16.v v16, (a2) ; RV64-NEXT: vl8re16.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv40f16_nxv8f16: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a1, 28 ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v16 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v18, v12 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 2 ; ZVBB-RV32-NEXT: add a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v16, v8 ; ZVBB-RV32-NEXT: vmv2r.v v22, v16 ; ZVBB-RV32-NEXT: vmv2r.v v24, v18 ; ZVBB-RV32-NEXT: vmv1r.v v26, v20 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v23, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: add a5, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v25, v14 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v18, v11 ; ZVBB-RV32-NEXT: vsseg5e16.v v22, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v20, v15 ; ZVBB-RV32-NEXT: vsseg5e16.v v17, (a1) ; ZVBB-RV32-NEXT: vl1re16.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v17, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1re16.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v11, (a6) ; ZVBB-RV32-NEXT: vl1re16.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re16.v v9, (a3) ; ZVBB-RV32-NEXT: vl1re16.v v14, (a4) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 10 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v15, (a5) ; ZVBB-RV32-NEXT: vl1re16.v v12, (a6) ; ZVBB-RV32-NEXT: vl1re16.v v13, (a1) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vs2r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8re16.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re16.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv40f16_nxv8f16: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a1, 28 ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v16 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v18, v12 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 2 ; ZVBB-RV64-NEXT: add a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v16, v8 ; ZVBB-RV64-NEXT: vmv2r.v v22, v16 ; ZVBB-RV64-NEXT: vmv2r.v v24, v18 ; ZVBB-RV64-NEXT: vmv1r.v v26, v20 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v23, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: add a5, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v25, v14 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v18, v11 ; ZVBB-RV64-NEXT: vsseg5e16.v v22, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v20, v15 ; ZVBB-RV64-NEXT: vsseg5e16.v v17, (a1) ; ZVBB-RV64-NEXT: vl1re16.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v17, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1re16.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v11, (a6) ; ZVBB-RV64-NEXT: vl1re16.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re16.v v9, (a3) ; ZVBB-RV64-NEXT: vl1re16.v v14, (a4) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 10 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v15, (a5) ; ZVBB-RV64-NEXT: vl1re16.v v12, (a6) ; ZVBB-RV64-NEXT: vl1re16.v v13, (a1) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vs2r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8re16.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re16.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv40f16_nxv8f16: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 28 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v16 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v18, v12 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 2 ; ZIP-NEXT: add a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v16, v8 ; ZIP-NEXT: vmv2r.v v22, v16 ; ZIP-NEXT: vmv2r.v v24, v18 ; ZIP-NEXT: vmv1r.v v26, v20 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v23, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: add a5, a4, a2 ; ZIP-NEXT: vmv1r.v v25, v14 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v18, v11 ; ZIP-NEXT: vsseg5e16.v v22, (a0) ; ZIP-NEXT: vmv1r.v v20, v15 ; ZIP-NEXT: vsseg5e16.v v17, (a1) ; ZIP-NEXT: vl1re16.v v16, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v17, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1re16.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v11, (a6) ; ZIP-NEXT: vl1re16.v v8, (a0) ; ZIP-NEXT: vl1re16.v v9, (a3) ; ZIP-NEXT: vl1re16.v v14, (a4) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 10 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v15, (a5) ; ZIP-NEXT: vl1re16.v v12, (a6) ; ZIP-NEXT: vl1re16.v v13, (a1) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vs2r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8re16.v v16, (a2) ; ZIP-NEXT: vl8re16.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave5.nxv40f16( %v0, %v1, %v2, %v3, %v4) ret %res } define @vector_interleave_nxv10bf16_nxv2bf16( %v0, %v1, %v2, %v3, %v4) nounwind { ; CHECK-LABEL: vector_interleave_nxv10bf16_nxv2bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: vsetvli a5, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: add a5, a4, a2 ; CHECK-NEXT: vle16.v v8, (a5) ; CHECK-NEXT: vle16.v v9, (a4) ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vle16.v v10, (a3) ; CHECK-NEXT: vsetvli a3, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: add a2, a5, a2 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v10, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv10bf16_nxv2bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: vsetvli a5, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vsseg5e16.v v8, (a0) ; ZVBB-NEXT: add a5, a4, a2 ; ZVBB-NEXT: vle16.v v8, (a5) ; ZVBB-NEXT: vle16.v v9, (a4) ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vle16.v v10, (a3) ; ZVBB-NEXT: vsetvli a3, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v8, a1 ; ZVBB-NEXT: vsetvli a3, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v10, a1 ; ZVBB-NEXT: add a2, a5, a2 ; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v10, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave5.nxv10bf16( %v0, %v1, %v2, %v3, %v4) ret %res } define @vector_interleave_nxv20bf16_nxv4bf16( %v0, %v1, %v2, %v3, %v4) nounwind { ; CHECK-LABEL: vector_interleave_nxv20bf16_nxv4bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 2 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re16.v v11, (a3) ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v9, (a2) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1re16.v v12, (a1) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 2 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv20bf16_nxv4bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 2 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; ZVBB-NEXT: vsseg5e16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re16.v v11, (a3) ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v9, (a2) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1re16.v v12, (a1) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 2 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave5.nxv20bf16( %v0, %v1, %v2, %v3, %v4) ret %res } define @vector_interleave_nxv40bf16_nxv8bf16( %v0, %v1, %v2, %v3, %v4) nounwind { ; RV32-LABEL: vector_interleave_nxv40bf16_nxv8bf16: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 28 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v16 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v18, v12 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 2 ; RV32-NEXT: add a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v16, v8 ; RV32-NEXT: vmv2r.v v22, v16 ; RV32-NEXT: vmv2r.v v24, v18 ; RV32-NEXT: vmv1r.v v26, v20 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v23, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: add a5, a4, a2 ; RV32-NEXT: vmv1r.v v25, v14 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v18, v11 ; RV32-NEXT: vsseg5e16.v v22, (a0) ; RV32-NEXT: vmv1r.v v20, v15 ; RV32-NEXT: vsseg5e16.v v17, (a1) ; RV32-NEXT: vl1re16.v v16, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v17, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1re16.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v11, (a6) ; RV32-NEXT: vl1re16.v v8, (a0) ; RV32-NEXT: vl1re16.v v9, (a3) ; RV32-NEXT: vl1re16.v v14, (a4) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 10 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v15, (a5) ; RV32-NEXT: vl1re16.v v12, (a6) ; RV32-NEXT: vl1re16.v v13, (a1) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vs2r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8re16.v v16, (a2) ; RV32-NEXT: vl8re16.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv40bf16_nxv8bf16: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 28 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v16 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v18, v12 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 2 ; RV64-NEXT: add a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v16, v8 ; RV64-NEXT: vmv2r.v v22, v16 ; RV64-NEXT: vmv2r.v v24, v18 ; RV64-NEXT: vmv1r.v v26, v20 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v23, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: add a5, a4, a2 ; RV64-NEXT: vmv1r.v v25, v14 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v18, v11 ; RV64-NEXT: vsseg5e16.v v22, (a0) ; RV64-NEXT: vmv1r.v v20, v15 ; RV64-NEXT: vsseg5e16.v v17, (a1) ; RV64-NEXT: vl1re16.v v16, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v17, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1re16.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v11, (a6) ; RV64-NEXT: vl1re16.v v8, (a0) ; RV64-NEXT: vl1re16.v v9, (a3) ; RV64-NEXT: vl1re16.v v14, (a4) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 10 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v15, (a5) ; RV64-NEXT: vl1re16.v v12, (a6) ; RV64-NEXT: vl1re16.v v13, (a1) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vs2r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8re16.v v16, (a2) ; RV64-NEXT: vl8re16.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv40bf16_nxv8bf16: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a1, 28 ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v16 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v18, v12 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 2 ; ZVBB-RV32-NEXT: add a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v16, v8 ; ZVBB-RV32-NEXT: vmv2r.v v22, v16 ; ZVBB-RV32-NEXT: vmv2r.v v24, v18 ; ZVBB-RV32-NEXT: vmv1r.v v26, v20 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v23, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: add a5, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v25, v14 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v18, v11 ; ZVBB-RV32-NEXT: vsseg5e16.v v22, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v20, v15 ; ZVBB-RV32-NEXT: vsseg5e16.v v17, (a1) ; ZVBB-RV32-NEXT: vl1re16.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v17, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1re16.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v11, (a6) ; ZVBB-RV32-NEXT: vl1re16.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re16.v v9, (a3) ; ZVBB-RV32-NEXT: vl1re16.v v14, (a4) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 10 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v15, (a5) ; ZVBB-RV32-NEXT: vl1re16.v v12, (a6) ; ZVBB-RV32-NEXT: vl1re16.v v13, (a1) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vs2r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8re16.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re16.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv40bf16_nxv8bf16: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a1, 28 ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v16 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v18, v12 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 2 ; ZVBB-RV64-NEXT: add a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v16, v8 ; ZVBB-RV64-NEXT: vmv2r.v v22, v16 ; ZVBB-RV64-NEXT: vmv2r.v v24, v18 ; ZVBB-RV64-NEXT: vmv1r.v v26, v20 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v23, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: add a5, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v25, v14 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v18, v11 ; ZVBB-RV64-NEXT: vsseg5e16.v v22, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v20, v15 ; ZVBB-RV64-NEXT: vsseg5e16.v v17, (a1) ; ZVBB-RV64-NEXT: vl1re16.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v17, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1re16.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v11, (a6) ; ZVBB-RV64-NEXT: vl1re16.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re16.v v9, (a3) ; ZVBB-RV64-NEXT: vl1re16.v v14, (a4) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 10 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v15, (a5) ; ZVBB-RV64-NEXT: vl1re16.v v12, (a6) ; ZVBB-RV64-NEXT: vl1re16.v v13, (a1) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vs2r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8re16.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re16.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv40bf16_nxv8bf16: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 28 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v16 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v18, v12 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 2 ; ZIP-NEXT: add a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v16, v8 ; ZIP-NEXT: vmv2r.v v22, v16 ; ZIP-NEXT: vmv2r.v v24, v18 ; ZIP-NEXT: vmv1r.v v26, v20 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v23, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: add a5, a4, a2 ; ZIP-NEXT: vmv1r.v v25, v14 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v18, v11 ; ZIP-NEXT: vsseg5e16.v v22, (a0) ; ZIP-NEXT: vmv1r.v v20, v15 ; ZIP-NEXT: vsseg5e16.v v17, (a1) ; ZIP-NEXT: vl1re16.v v16, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v17, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1re16.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v11, (a6) ; ZIP-NEXT: vl1re16.v v8, (a0) ; ZIP-NEXT: vl1re16.v v9, (a3) ; ZIP-NEXT: vl1re16.v v14, (a4) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 10 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v15, (a5) ; ZIP-NEXT: vl1re16.v v12, (a6) ; ZIP-NEXT: vl1re16.v v13, (a1) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vs2r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8re16.v v16, (a2) ; ZIP-NEXT: vl8re16.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave5.nxv40bf16( %v0, %v1, %v2, %v3, %v4) ret %res } define @vector_interleave_nxv5f32_nxv1f32( %v0, %v1, %v2, %v3, %v4) nounwind { ; CHECK-LABEL: vector_interleave_nxv5f32_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: vsetvli a5, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: add a5, a4, a2 ; CHECK-NEXT: vle32.v v8, (a5) ; CHECK-NEXT: vle32.v v9, (a4) ; CHECK-NEXT: srli a1, a1, 3 ; CHECK-NEXT: vle32.v v10, (a3) ; CHECK-NEXT: vsetvli a3, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a3, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: add a2, a5, a2 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v10, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv5f32_nxv1f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: vsetvli a5, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vsseg5e32.v v8, (a0) ; ZVBB-NEXT: add a5, a4, a2 ; ZVBB-NEXT: vle32.v v8, (a5) ; ZVBB-NEXT: vle32.v v9, (a4) ; ZVBB-NEXT: srli a1, a1, 3 ; ZVBB-NEXT: vle32.v v10, (a3) ; ZVBB-NEXT: vsetvli a3, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v8, a1 ; ZVBB-NEXT: vsetvli a3, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vle32.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v10, a1 ; ZVBB-NEXT: add a2, a5, a2 ; ZVBB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vle32.v v10, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave5.nxv5f32( %v0, %v1, %v2, %v3, %v4) ret %res } define @vector_interleave_nxv10f32_nxv2f32( %v0, %v1, %v2, %v3, %v4) nounwind { ; CHECK-LABEL: vector_interleave_nxv10f32_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 2 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e32, m1, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: vl1re32.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re32.v v11, (a3) ; CHECK-NEXT: vl1re32.v v8, (a0) ; CHECK-NEXT: vl1re32.v v9, (a2) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1re32.v v12, (a1) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 2 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv10f32_nxv2f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 2 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e32, m1, ta, ma ; ZVBB-NEXT: vsseg5e32.v v8, (a0) ; ZVBB-NEXT: vl1re32.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re32.v v11, (a3) ; ZVBB-NEXT: vl1re32.v v8, (a0) ; ZVBB-NEXT: vl1re32.v v9, (a2) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1re32.v v12, (a1) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 2 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave5.nxv10f32( %v0, %v1, %v2, %v3, %v4) ret %res } define @vector_interleave_nxv20f32_nxv4f32( %v0, %v1, %v2, %v3, %v4) nounwind { ; RV32-LABEL: vector_interleave_nxv20f32_nxv4f32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 28 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v16 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v18, v12 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 2 ; RV32-NEXT: add a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v16, v8 ; RV32-NEXT: vmv2r.v v22, v16 ; RV32-NEXT: vmv2r.v v24, v18 ; RV32-NEXT: vmv1r.v v26, v20 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v23, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: add a5, a4, a2 ; RV32-NEXT: vmv1r.v v25, v14 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v18, v11 ; RV32-NEXT: vsseg5e32.v v22, (a0) ; RV32-NEXT: vmv1r.v v20, v15 ; RV32-NEXT: vsseg5e32.v v17, (a1) ; RV32-NEXT: vl1re32.v v16, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v17, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1re32.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v11, (a6) ; RV32-NEXT: vl1re32.v v8, (a0) ; RV32-NEXT: vl1re32.v v9, (a3) ; RV32-NEXT: vl1re32.v v14, (a4) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 10 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v15, (a5) ; RV32-NEXT: vl1re32.v v12, (a6) ; RV32-NEXT: vl1re32.v v13, (a1) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vs2r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8re32.v v16, (a2) ; RV32-NEXT: vl8re32.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv20f32_nxv4f32: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 28 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v16 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v18, v12 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 2 ; RV64-NEXT: add a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v16, v8 ; RV64-NEXT: vmv2r.v v22, v16 ; RV64-NEXT: vmv2r.v v24, v18 ; RV64-NEXT: vmv1r.v v26, v20 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v23, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: add a5, a4, a2 ; RV64-NEXT: vmv1r.v v25, v14 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v18, v11 ; RV64-NEXT: vsseg5e32.v v22, (a0) ; RV64-NEXT: vmv1r.v v20, v15 ; RV64-NEXT: vsseg5e32.v v17, (a1) ; RV64-NEXT: vl1re32.v v16, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v17, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1re32.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v11, (a6) ; RV64-NEXT: vl1re32.v v8, (a0) ; RV64-NEXT: vl1re32.v v9, (a3) ; RV64-NEXT: vl1re32.v v14, (a4) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 10 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v15, (a5) ; RV64-NEXT: vl1re32.v v12, (a6) ; RV64-NEXT: vl1re32.v v13, (a1) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vs2r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8re32.v v16, (a2) ; RV64-NEXT: vl8re32.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv20f32_nxv4f32: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a1, 28 ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v16 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v18, v12 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 2 ; ZVBB-RV32-NEXT: add a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v16, v8 ; ZVBB-RV32-NEXT: vmv2r.v v22, v16 ; ZVBB-RV32-NEXT: vmv2r.v v24, v18 ; ZVBB-RV32-NEXT: vmv1r.v v26, v20 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v23, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: add a5, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v25, v14 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v18, v11 ; ZVBB-RV32-NEXT: vsseg5e32.v v22, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v20, v15 ; ZVBB-RV32-NEXT: vsseg5e32.v v17, (a1) ; ZVBB-RV32-NEXT: vl1re32.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v17, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1re32.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v11, (a6) ; ZVBB-RV32-NEXT: vl1re32.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re32.v v9, (a3) ; ZVBB-RV32-NEXT: vl1re32.v v14, (a4) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 10 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v15, (a5) ; ZVBB-RV32-NEXT: vl1re32.v v12, (a6) ; ZVBB-RV32-NEXT: vl1re32.v v13, (a1) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vs2r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8re32.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re32.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv20f32_nxv4f32: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a1, 28 ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v16 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v18, v12 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 2 ; ZVBB-RV64-NEXT: add a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v16, v8 ; ZVBB-RV64-NEXT: vmv2r.v v22, v16 ; ZVBB-RV64-NEXT: vmv2r.v v24, v18 ; ZVBB-RV64-NEXT: vmv1r.v v26, v20 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v23, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: add a5, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v25, v14 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v18, v11 ; ZVBB-RV64-NEXT: vsseg5e32.v v22, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v20, v15 ; ZVBB-RV64-NEXT: vsseg5e32.v v17, (a1) ; ZVBB-RV64-NEXT: vl1re32.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v17, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1re32.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v11, (a6) ; ZVBB-RV64-NEXT: vl1re32.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re32.v v9, (a3) ; ZVBB-RV64-NEXT: vl1re32.v v14, (a4) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 10 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v15, (a5) ; ZVBB-RV64-NEXT: vl1re32.v v12, (a6) ; ZVBB-RV64-NEXT: vl1re32.v v13, (a1) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vs2r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8re32.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re32.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv20f32_nxv4f32: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 28 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v16 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v18, v12 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 2 ; ZIP-NEXT: add a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v16, v8 ; ZIP-NEXT: vmv2r.v v22, v16 ; ZIP-NEXT: vmv2r.v v24, v18 ; ZIP-NEXT: vmv1r.v v26, v20 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v23, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: add a5, a4, a2 ; ZIP-NEXT: vmv1r.v v25, v14 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v18, v11 ; ZIP-NEXT: vsseg5e32.v v22, (a0) ; ZIP-NEXT: vmv1r.v v20, v15 ; ZIP-NEXT: vsseg5e32.v v17, (a1) ; ZIP-NEXT: vl1re32.v v16, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v17, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1re32.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v11, (a6) ; ZIP-NEXT: vl1re32.v v8, (a0) ; ZIP-NEXT: vl1re32.v v9, (a3) ; ZIP-NEXT: vl1re32.v v14, (a4) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 10 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v15, (a5) ; ZIP-NEXT: vl1re32.v v12, (a6) ; ZIP-NEXT: vl1re32.v v13, (a1) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vs2r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8re32.v v16, (a2) ; ZIP-NEXT: vl8re32.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave5.nxv20f32( %v0, %v1, %v2, %v3, %v4) ret %res } define @vector_interleave_nxv5f64_nxv1f64( %v0, %v1, %v2, %v3, %v4) nounwind { ; CHECK-LABEL: vector_interleave_nxv5f64_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 2 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e64, m1, ta, ma ; CHECK-NEXT: vsseg5e64.v v8, (a0) ; CHECK-NEXT: vl1re64.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re64.v v11, (a3) ; CHECK-NEXT: vl1re64.v v8, (a0) ; CHECK-NEXT: vl1re64.v v9, (a2) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1re64.v v12, (a1) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 2 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv5f64_nxv1f64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 2 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e64, m1, ta, ma ; ZVBB-NEXT: vsseg5e64.v v8, (a0) ; ZVBB-NEXT: vl1re64.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re64.v v11, (a3) ; ZVBB-NEXT: vl1re64.v v8, (a0) ; ZVBB-NEXT: vl1re64.v v9, (a2) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1re64.v v12, (a1) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 2 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave5.nxv5f64( %v0, %v1, %v2, %v3, %v4) ret %res } define @vector_interleave_nxv10f64_nxv2f64( %v0, %v1, %v2, %v3, %v4) nounwind { ; RV32-LABEL: vector_interleave_nxv10f64_nxv2f64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 28 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v16 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v18, v12 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 2 ; RV32-NEXT: add a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v16, v8 ; RV32-NEXT: vmv2r.v v22, v16 ; RV32-NEXT: vmv2r.v v24, v18 ; RV32-NEXT: vmv1r.v v26, v20 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v23, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: add a5, a4, a2 ; RV32-NEXT: vmv1r.v v25, v14 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v18, v11 ; RV32-NEXT: vsseg5e64.v v22, (a0) ; RV32-NEXT: vmv1r.v v20, v15 ; RV32-NEXT: vsseg5e64.v v17, (a1) ; RV32-NEXT: vl1re64.v v16, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v17, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1re64.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v11, (a6) ; RV32-NEXT: vl1re64.v v8, (a0) ; RV32-NEXT: vl1re64.v v9, (a3) ; RV32-NEXT: vl1re64.v v14, (a4) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 10 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v15, (a5) ; RV32-NEXT: vl1re64.v v12, (a6) ; RV32-NEXT: vl1re64.v v13, (a1) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vs2r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8re64.v v16, (a2) ; RV32-NEXT: vl8re64.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv10f64_nxv2f64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 28 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v16 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v18, v12 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 2 ; RV64-NEXT: add a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v16, v8 ; RV64-NEXT: vmv2r.v v22, v16 ; RV64-NEXT: vmv2r.v v24, v18 ; RV64-NEXT: vmv1r.v v26, v20 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v23, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: add a5, a4, a2 ; RV64-NEXT: vmv1r.v v25, v14 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v18, v11 ; RV64-NEXT: vsseg5e64.v v22, (a0) ; RV64-NEXT: vmv1r.v v20, v15 ; RV64-NEXT: vsseg5e64.v v17, (a1) ; RV64-NEXT: vl1re64.v v16, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v17, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1re64.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v11, (a6) ; RV64-NEXT: vl1re64.v v8, (a0) ; RV64-NEXT: vl1re64.v v9, (a3) ; RV64-NEXT: vl1re64.v v14, (a4) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 10 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v15, (a5) ; RV64-NEXT: vl1re64.v v12, (a6) ; RV64-NEXT: vl1re64.v v13, (a1) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vs2r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8re64.v v16, (a2) ; RV64-NEXT: vl8re64.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv10f64_nxv2f64: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a1, 28 ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v16 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v18, v12 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 2 ; ZVBB-RV32-NEXT: add a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v16, v8 ; ZVBB-RV32-NEXT: vmv2r.v v22, v16 ; ZVBB-RV32-NEXT: vmv2r.v v24, v18 ; ZVBB-RV32-NEXT: vmv1r.v v26, v20 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v23, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: add a5, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v25, v14 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v18, v11 ; ZVBB-RV32-NEXT: vsseg5e64.v v22, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v20, v15 ; ZVBB-RV32-NEXT: vsseg5e64.v v17, (a1) ; ZVBB-RV32-NEXT: vl1re64.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v17, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1re64.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v11, (a6) ; ZVBB-RV32-NEXT: vl1re64.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re64.v v9, (a3) ; ZVBB-RV32-NEXT: vl1re64.v v14, (a4) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 10 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v15, (a5) ; ZVBB-RV32-NEXT: vl1re64.v v12, (a6) ; ZVBB-RV32-NEXT: vl1re64.v v13, (a1) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vs2r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8re64.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re64.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv10f64_nxv2f64: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a1, 28 ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v16 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v18, v12 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 2 ; ZVBB-RV64-NEXT: add a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v16, v8 ; ZVBB-RV64-NEXT: vmv2r.v v22, v16 ; ZVBB-RV64-NEXT: vmv2r.v v24, v18 ; ZVBB-RV64-NEXT: vmv1r.v v26, v20 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v23, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: add a5, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v25, v14 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v18, v11 ; ZVBB-RV64-NEXT: vsseg5e64.v v22, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v20, v15 ; ZVBB-RV64-NEXT: vsseg5e64.v v17, (a1) ; ZVBB-RV64-NEXT: vl1re64.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v17, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1re64.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v11, (a6) ; ZVBB-RV64-NEXT: vl1re64.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re64.v v9, (a3) ; ZVBB-RV64-NEXT: vl1re64.v v14, (a4) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 10 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v15, (a5) ; ZVBB-RV64-NEXT: vl1re64.v v12, (a6) ; ZVBB-RV64-NEXT: vl1re64.v v13, (a1) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vs2r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8re64.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re64.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv10f64_nxv2f64: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 28 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v16 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v18, v12 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 2 ; ZIP-NEXT: add a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v16, v8 ; ZIP-NEXT: vmv2r.v v22, v16 ; ZIP-NEXT: vmv2r.v v24, v18 ; ZIP-NEXT: vmv1r.v v26, v20 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v23, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: add a5, a4, a2 ; ZIP-NEXT: vmv1r.v v25, v14 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v18, v11 ; ZIP-NEXT: vsseg5e64.v v22, (a0) ; ZIP-NEXT: vmv1r.v v20, v15 ; ZIP-NEXT: vsseg5e64.v v17, (a1) ; ZIP-NEXT: vl1re64.v v16, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v17, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1re64.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v11, (a6) ; ZIP-NEXT: vl1re64.v v8, (a0) ; ZIP-NEXT: vl1re64.v v9, (a3) ; ZIP-NEXT: vl1re64.v v14, (a4) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 10 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v15, (a5) ; ZIP-NEXT: vl1re64.v v12, (a6) ; ZIP-NEXT: vl1re64.v v13, (a1) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vs2r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8re64.v v16, (a2) ; ZIP-NEXT: vl8re64.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave5.nxv10f64( %v0, %v1, %v2, %v3, %v4) ret %res } define @vector_interleave_nxv12f16_nxv2f16( %v0, %v1, %v2, %v3, %v4, %v5) nounwind { ; CHECK-LABEL: vector_interleave_nxv12f16_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: add a5, a4, a2 ; CHECK-NEXT: vsetvli a6, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: add a6, a5, a2 ; CHECK-NEXT: add a2, a6, a2 ; CHECK-NEXT: vle16.v v10, (a6) ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vle16.v v11, (a5) ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v10, v8, a1 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a4) ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v11, a1 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v11, (a3) ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v11, a1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv12f16_nxv2f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: add a5, a4, a2 ; ZVBB-NEXT: vsetvli a6, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vsseg6e16.v v8, (a0) ; ZVBB-NEXT: add a6, a5, a2 ; ZVBB-NEXT: add a2, a6, a2 ; ZVBB-NEXT: vle16.v v10, (a6) ; ZVBB-NEXT: vle16.v v8, (a2) ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vle16.v v11, (a5) ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v10, v8, a1 ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v9, (a4) ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v11, a1 ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v11, (a3) ; ZVBB-NEXT: vle16.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v11, a1 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave6.nxv12f16( %v0, %v1, %v2, %v3, %v4, %v5) ret %res } define @vector_interleave_nxv24f16_nxv4f16( %v0, %v1, %v2, %v3, %v4, %v5) nounwind { ; CHECK-LABEL: vector_interleave_nxv24f16_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re16.v v11, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v9, (a2) ; CHECK-NEXT: vl1re16.v v12, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1re16.v v13, (a1) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv24f16_nxv4f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; ZVBB-NEXT: vsseg6e16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re16.v v11, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v9, (a2) ; ZVBB-NEXT: vl1re16.v v12, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1re16.v v13, (a1) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave6.nxv24f16( %v0, %v1, %v2, %v3, %v4, %v5) ret %res } define @vector_interleave_nxv48f16_nxv8f16( %v0, %v1, %v2, %v3, %v4, %v5) nounwind { ; RV32-LABEL: vector_interleave_nxv48f16_nxv8f16: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 28 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v14 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: vmv2r.v v24, v10 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a0, 6 ; RV32-NEXT: mul a1, a1, a0 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv1r.v v10, v25 ; RV32-NEXT: vmv1r.v v11, v23 ; RV32-NEXT: vmv1r.v v12, v21 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv1r.v v13, v17 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv1r.v v14, v19 ; RV32-NEXT: vsseg6e16.v v9, (a1) ; RV32-NEXT: vmv1r.v v9, v24 ; RV32-NEXT: add a5, a1, a2 ; RV32-NEXT: vmv1r.v v10, v22 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v11, v20 ; RV32-NEXT: add a4, a3, a2 ; RV32-NEXT: vmv1r.v v12, v16 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v13, v18 ; RV32-NEXT: vsseg6e16.v v8, (a0) ; RV32-NEXT: vl1re16.v v14, (a1) ; RV32-NEXT: add a1, a6, a2 ; RV32-NEXT: vl1re16.v v15, (a5) ; RV32-NEXT: add a5, a1, a2 ; RV32-NEXT: vl1re16.v v18, (a5) ; RV32-NEXT: add a5, a5, a2 ; RV32-NEXT: vl1re16.v v19, (a5) ; RV32-NEXT: add a5, a4, a2 ; RV32-NEXT: vl1re16.v v16, (a6) ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vl1re16.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v13, (a6) ; RV32-NEXT: csrr a6, vlenb ; RV32-NEXT: li a7, 12 ; RV32-NEXT: mul a6, a6, a7 ; RV32-NEXT: add a6, sp, a6 ; RV32-NEXT: addi a6, a6, 64 ; RV32-NEXT: vl1re16.v v17, (a1) ; RV32-NEXT: vl1re16.v v10, (a4) ; RV32-NEXT: vl1re16.v v11, (a5) ; RV32-NEXT: vl1re16.v v8, (a0) ; RV32-NEXT: vl1re16.v v9, (a3) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a6, a2 ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a6) ; RV32-NEXT: vl8re16.v v16, (a2) ; RV32-NEXT: vl8re16.v v8, (a6) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv48f16_nxv8f16: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 28 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v14 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: vmv2r.v v24, v10 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: li a0, 6 ; RV64-NEXT: mul a1, a1, a0 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv1r.v v10, v25 ; RV64-NEXT: vmv1r.v v11, v23 ; RV64-NEXT: vmv1r.v v12, v21 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv1r.v v13, v17 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv1r.v v14, v19 ; RV64-NEXT: vsseg6e16.v v9, (a1) ; RV64-NEXT: vmv1r.v v9, v24 ; RV64-NEXT: add a5, a1, a2 ; RV64-NEXT: vmv1r.v v10, v22 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v11, v20 ; RV64-NEXT: add a4, a3, a2 ; RV64-NEXT: vmv1r.v v12, v16 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v13, v18 ; RV64-NEXT: vsseg6e16.v v8, (a0) ; RV64-NEXT: vl1re16.v v14, (a1) ; RV64-NEXT: add a1, a6, a2 ; RV64-NEXT: vl1re16.v v15, (a5) ; RV64-NEXT: add a5, a1, a2 ; RV64-NEXT: vl1re16.v v18, (a5) ; RV64-NEXT: add a5, a5, a2 ; RV64-NEXT: vl1re16.v v19, (a5) ; RV64-NEXT: add a5, a4, a2 ; RV64-NEXT: vl1re16.v v16, (a6) ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vl1re16.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v13, (a6) ; RV64-NEXT: csrr a6, vlenb ; RV64-NEXT: li a7, 12 ; RV64-NEXT: mul a6, a6, a7 ; RV64-NEXT: add a6, sp, a6 ; RV64-NEXT: addi a6, a6, 64 ; RV64-NEXT: vl1re16.v v17, (a1) ; RV64-NEXT: vl1re16.v v10, (a4) ; RV64-NEXT: vl1re16.v v11, (a5) ; RV64-NEXT: vl1re16.v v8, (a0) ; RV64-NEXT: vl1re16.v v9, (a3) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a6, a2 ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a6) ; RV64-NEXT: vl8re16.v v16, (a2) ; RV64-NEXT: vl8re16.v v8, (a6) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv48f16_nxv8f16: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a1, 28 ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v14 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: vmv2r.v v24, v10 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: li a0, 6 ; ZVBB-RV32-NEXT: mul a1, a1, a0 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv1r.v v10, v25 ; ZVBB-RV32-NEXT: vmv1r.v v11, v23 ; ZVBB-RV32-NEXT: vmv1r.v v12, v21 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv1r.v v13, v17 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv1r.v v14, v19 ; ZVBB-RV32-NEXT: vsseg6e16.v v9, (a1) ; ZVBB-RV32-NEXT: vmv1r.v v9, v24 ; ZVBB-RV32-NEXT: add a5, a1, a2 ; ZVBB-RV32-NEXT: vmv1r.v v10, v22 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v11, v20 ; ZVBB-RV32-NEXT: add a4, a3, a2 ; ZVBB-RV32-NEXT: vmv1r.v v12, v16 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v13, v18 ; ZVBB-RV32-NEXT: vsseg6e16.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re16.v v14, (a1) ; ZVBB-RV32-NEXT: add a1, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v15, (a5) ; ZVBB-RV32-NEXT: add a5, a1, a2 ; ZVBB-RV32-NEXT: vl1re16.v v18, (a5) ; ZVBB-RV32-NEXT: add a5, a5, a2 ; ZVBB-RV32-NEXT: vl1re16.v v19, (a5) ; ZVBB-RV32-NEXT: add a5, a4, a2 ; ZVBB-RV32-NEXT: vl1re16.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vl1re16.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v13, (a6) ; ZVBB-RV32-NEXT: csrr a6, vlenb ; ZVBB-RV32-NEXT: li a7, 12 ; ZVBB-RV32-NEXT: mul a6, a6, a7 ; ZVBB-RV32-NEXT: add a6, sp, a6 ; ZVBB-RV32-NEXT: addi a6, a6, 64 ; ZVBB-RV32-NEXT: vl1re16.v v17, (a1) ; ZVBB-RV32-NEXT: vl1re16.v v10, (a4) ; ZVBB-RV32-NEXT: vl1re16.v v11, (a5) ; ZVBB-RV32-NEXT: vl1re16.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re16.v v9, (a3) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a6, a2 ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a6) ; ZVBB-RV32-NEXT: vl8re16.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re16.v v8, (a6) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv48f16_nxv8f16: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a1, 28 ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v14 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: vmv2r.v v24, v10 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: li a0, 6 ; ZVBB-RV64-NEXT: mul a1, a1, a0 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv1r.v v10, v25 ; ZVBB-RV64-NEXT: vmv1r.v v11, v23 ; ZVBB-RV64-NEXT: vmv1r.v v12, v21 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv1r.v v13, v17 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv1r.v v14, v19 ; ZVBB-RV64-NEXT: vsseg6e16.v v9, (a1) ; ZVBB-RV64-NEXT: vmv1r.v v9, v24 ; ZVBB-RV64-NEXT: add a5, a1, a2 ; ZVBB-RV64-NEXT: vmv1r.v v10, v22 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v11, v20 ; ZVBB-RV64-NEXT: add a4, a3, a2 ; ZVBB-RV64-NEXT: vmv1r.v v12, v16 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v13, v18 ; ZVBB-RV64-NEXT: vsseg6e16.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re16.v v14, (a1) ; ZVBB-RV64-NEXT: add a1, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v15, (a5) ; ZVBB-RV64-NEXT: add a5, a1, a2 ; ZVBB-RV64-NEXT: vl1re16.v v18, (a5) ; ZVBB-RV64-NEXT: add a5, a5, a2 ; ZVBB-RV64-NEXT: vl1re16.v v19, (a5) ; ZVBB-RV64-NEXT: add a5, a4, a2 ; ZVBB-RV64-NEXT: vl1re16.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vl1re16.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v13, (a6) ; ZVBB-RV64-NEXT: csrr a6, vlenb ; ZVBB-RV64-NEXT: li a7, 12 ; ZVBB-RV64-NEXT: mul a6, a6, a7 ; ZVBB-RV64-NEXT: add a6, sp, a6 ; ZVBB-RV64-NEXT: addi a6, a6, 64 ; ZVBB-RV64-NEXT: vl1re16.v v17, (a1) ; ZVBB-RV64-NEXT: vl1re16.v v10, (a4) ; ZVBB-RV64-NEXT: vl1re16.v v11, (a5) ; ZVBB-RV64-NEXT: vl1re16.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re16.v v9, (a3) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a6, a2 ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a6) ; ZVBB-RV64-NEXT: vl8re16.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re16.v v8, (a6) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv48f16_nxv8f16: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 28 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v14 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: vmv2r.v v24, v10 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: li a0, 6 ; ZIP-NEXT: mul a1, a1, a0 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv1r.v v10, v25 ; ZIP-NEXT: vmv1r.v v11, v23 ; ZIP-NEXT: vmv1r.v v12, v21 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv1r.v v13, v17 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv1r.v v14, v19 ; ZIP-NEXT: vsseg6e16.v v9, (a1) ; ZIP-NEXT: vmv1r.v v9, v24 ; ZIP-NEXT: add a5, a1, a2 ; ZIP-NEXT: vmv1r.v v10, v22 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v11, v20 ; ZIP-NEXT: add a4, a3, a2 ; ZIP-NEXT: vmv1r.v v12, v16 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v13, v18 ; ZIP-NEXT: vsseg6e16.v v8, (a0) ; ZIP-NEXT: vl1re16.v v14, (a1) ; ZIP-NEXT: add a1, a6, a2 ; ZIP-NEXT: vl1re16.v v15, (a5) ; ZIP-NEXT: add a5, a1, a2 ; ZIP-NEXT: vl1re16.v v18, (a5) ; ZIP-NEXT: add a5, a5, a2 ; ZIP-NEXT: vl1re16.v v19, (a5) ; ZIP-NEXT: add a5, a4, a2 ; ZIP-NEXT: vl1re16.v v16, (a6) ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vl1re16.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v13, (a6) ; ZIP-NEXT: csrr a6, vlenb ; ZIP-NEXT: li a7, 12 ; ZIP-NEXT: mul a6, a6, a7 ; ZIP-NEXT: add a6, sp, a6 ; ZIP-NEXT: addi a6, a6, 64 ; ZIP-NEXT: vl1re16.v v17, (a1) ; ZIP-NEXT: vl1re16.v v10, (a4) ; ZIP-NEXT: vl1re16.v v11, (a5) ; ZIP-NEXT: vl1re16.v v8, (a0) ; ZIP-NEXT: vl1re16.v v9, (a3) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a6, a2 ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a6) ; ZIP-NEXT: vl8re16.v v16, (a2) ; ZIP-NEXT: vl8re16.v v8, (a6) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave6.nxv48f16( %v0, %v1, %v2, %v3, %v4, %v5) ret %res } define @vector_interleave_nxv12bf16_nxv2bf16( %v0, %v1, %v2, %v3, %v4, %v5) nounwind { ; CHECK-LABEL: vector_interleave_nxv12bf16_nxv2bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: add a5, a4, a2 ; CHECK-NEXT: vsetvli a6, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: add a6, a5, a2 ; CHECK-NEXT: add a2, a6, a2 ; CHECK-NEXT: vle16.v v10, (a6) ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vle16.v v11, (a5) ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v10, v8, a1 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a4) ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v11, a1 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v11, (a3) ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v11, a1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv12bf16_nxv2bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: add a5, a4, a2 ; ZVBB-NEXT: vsetvli a6, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vsseg6e16.v v8, (a0) ; ZVBB-NEXT: add a6, a5, a2 ; ZVBB-NEXT: add a2, a6, a2 ; ZVBB-NEXT: vle16.v v10, (a6) ; ZVBB-NEXT: vle16.v v8, (a2) ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vle16.v v11, (a5) ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v10, v8, a1 ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v9, (a4) ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v11, a1 ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v11, (a3) ; ZVBB-NEXT: vle16.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v11, a1 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave6.nxv12bf16( %v0, %v1, %v2, %v3, %v4, %v5) ret %res } define @vector_interleave_nxv24bf16_nxv4bf16( %v0, %v1, %v2, %v3, %v4, %v5) nounwind { ; CHECK-LABEL: vector_interleave_nxv24bf16_nxv4bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re16.v v11, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v9, (a2) ; CHECK-NEXT: vl1re16.v v12, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1re16.v v13, (a1) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv24bf16_nxv4bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; ZVBB-NEXT: vsseg6e16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re16.v v11, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v9, (a2) ; ZVBB-NEXT: vl1re16.v v12, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1re16.v v13, (a1) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave6.nxv24bf16( %v0, %v1, %v2, %v3, %v4, %v5) ret %res } define @vector_interleave_nxv48bf16_nxv8bf16( %v0, %v1, %v2, %v3, %v4, %v5) nounwind { ; RV32-LABEL: vector_interleave_nxv48bf16_nxv8bf16: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 28 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v14 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: vmv2r.v v24, v10 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a0, 6 ; RV32-NEXT: mul a1, a1, a0 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv1r.v v10, v25 ; RV32-NEXT: vmv1r.v v11, v23 ; RV32-NEXT: vmv1r.v v12, v21 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv1r.v v13, v17 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv1r.v v14, v19 ; RV32-NEXT: vsseg6e16.v v9, (a1) ; RV32-NEXT: vmv1r.v v9, v24 ; RV32-NEXT: add a5, a1, a2 ; RV32-NEXT: vmv1r.v v10, v22 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v11, v20 ; RV32-NEXT: add a4, a3, a2 ; RV32-NEXT: vmv1r.v v12, v16 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v13, v18 ; RV32-NEXT: vsseg6e16.v v8, (a0) ; RV32-NEXT: vl1re16.v v14, (a1) ; RV32-NEXT: add a1, a6, a2 ; RV32-NEXT: vl1re16.v v15, (a5) ; RV32-NEXT: add a5, a1, a2 ; RV32-NEXT: vl1re16.v v18, (a5) ; RV32-NEXT: add a5, a5, a2 ; RV32-NEXT: vl1re16.v v19, (a5) ; RV32-NEXT: add a5, a4, a2 ; RV32-NEXT: vl1re16.v v16, (a6) ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vl1re16.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v13, (a6) ; RV32-NEXT: csrr a6, vlenb ; RV32-NEXT: li a7, 12 ; RV32-NEXT: mul a6, a6, a7 ; RV32-NEXT: add a6, sp, a6 ; RV32-NEXT: addi a6, a6, 64 ; RV32-NEXT: vl1re16.v v17, (a1) ; RV32-NEXT: vl1re16.v v10, (a4) ; RV32-NEXT: vl1re16.v v11, (a5) ; RV32-NEXT: vl1re16.v v8, (a0) ; RV32-NEXT: vl1re16.v v9, (a3) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a6, a2 ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a6) ; RV32-NEXT: vl8re16.v v16, (a2) ; RV32-NEXT: vl8re16.v v8, (a6) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv48bf16_nxv8bf16: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 28 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v14 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: vmv2r.v v24, v10 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: li a0, 6 ; RV64-NEXT: mul a1, a1, a0 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv1r.v v10, v25 ; RV64-NEXT: vmv1r.v v11, v23 ; RV64-NEXT: vmv1r.v v12, v21 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv1r.v v13, v17 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv1r.v v14, v19 ; RV64-NEXT: vsseg6e16.v v9, (a1) ; RV64-NEXT: vmv1r.v v9, v24 ; RV64-NEXT: add a5, a1, a2 ; RV64-NEXT: vmv1r.v v10, v22 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v11, v20 ; RV64-NEXT: add a4, a3, a2 ; RV64-NEXT: vmv1r.v v12, v16 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v13, v18 ; RV64-NEXT: vsseg6e16.v v8, (a0) ; RV64-NEXT: vl1re16.v v14, (a1) ; RV64-NEXT: add a1, a6, a2 ; RV64-NEXT: vl1re16.v v15, (a5) ; RV64-NEXT: add a5, a1, a2 ; RV64-NEXT: vl1re16.v v18, (a5) ; RV64-NEXT: add a5, a5, a2 ; RV64-NEXT: vl1re16.v v19, (a5) ; RV64-NEXT: add a5, a4, a2 ; RV64-NEXT: vl1re16.v v16, (a6) ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vl1re16.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v13, (a6) ; RV64-NEXT: csrr a6, vlenb ; RV64-NEXT: li a7, 12 ; RV64-NEXT: mul a6, a6, a7 ; RV64-NEXT: add a6, sp, a6 ; RV64-NEXT: addi a6, a6, 64 ; RV64-NEXT: vl1re16.v v17, (a1) ; RV64-NEXT: vl1re16.v v10, (a4) ; RV64-NEXT: vl1re16.v v11, (a5) ; RV64-NEXT: vl1re16.v v8, (a0) ; RV64-NEXT: vl1re16.v v9, (a3) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a6, a2 ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a6) ; RV64-NEXT: vl8re16.v v16, (a2) ; RV64-NEXT: vl8re16.v v8, (a6) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv48bf16_nxv8bf16: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a1, 28 ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v14 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: vmv2r.v v24, v10 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: li a0, 6 ; ZVBB-RV32-NEXT: mul a1, a1, a0 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv1r.v v10, v25 ; ZVBB-RV32-NEXT: vmv1r.v v11, v23 ; ZVBB-RV32-NEXT: vmv1r.v v12, v21 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv1r.v v13, v17 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv1r.v v14, v19 ; ZVBB-RV32-NEXT: vsseg6e16.v v9, (a1) ; ZVBB-RV32-NEXT: vmv1r.v v9, v24 ; ZVBB-RV32-NEXT: add a5, a1, a2 ; ZVBB-RV32-NEXT: vmv1r.v v10, v22 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v11, v20 ; ZVBB-RV32-NEXT: add a4, a3, a2 ; ZVBB-RV32-NEXT: vmv1r.v v12, v16 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v13, v18 ; ZVBB-RV32-NEXT: vsseg6e16.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re16.v v14, (a1) ; ZVBB-RV32-NEXT: add a1, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v15, (a5) ; ZVBB-RV32-NEXT: add a5, a1, a2 ; ZVBB-RV32-NEXT: vl1re16.v v18, (a5) ; ZVBB-RV32-NEXT: add a5, a5, a2 ; ZVBB-RV32-NEXT: vl1re16.v v19, (a5) ; ZVBB-RV32-NEXT: add a5, a4, a2 ; ZVBB-RV32-NEXT: vl1re16.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vl1re16.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v13, (a6) ; ZVBB-RV32-NEXT: csrr a6, vlenb ; ZVBB-RV32-NEXT: li a7, 12 ; ZVBB-RV32-NEXT: mul a6, a6, a7 ; ZVBB-RV32-NEXT: add a6, sp, a6 ; ZVBB-RV32-NEXT: addi a6, a6, 64 ; ZVBB-RV32-NEXT: vl1re16.v v17, (a1) ; ZVBB-RV32-NEXT: vl1re16.v v10, (a4) ; ZVBB-RV32-NEXT: vl1re16.v v11, (a5) ; ZVBB-RV32-NEXT: vl1re16.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re16.v v9, (a3) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a6, a2 ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a6) ; ZVBB-RV32-NEXT: vl8re16.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re16.v v8, (a6) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv48bf16_nxv8bf16: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a1, 28 ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v14 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: vmv2r.v v24, v10 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: li a0, 6 ; ZVBB-RV64-NEXT: mul a1, a1, a0 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv1r.v v10, v25 ; ZVBB-RV64-NEXT: vmv1r.v v11, v23 ; ZVBB-RV64-NEXT: vmv1r.v v12, v21 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv1r.v v13, v17 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv1r.v v14, v19 ; ZVBB-RV64-NEXT: vsseg6e16.v v9, (a1) ; ZVBB-RV64-NEXT: vmv1r.v v9, v24 ; ZVBB-RV64-NEXT: add a5, a1, a2 ; ZVBB-RV64-NEXT: vmv1r.v v10, v22 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v11, v20 ; ZVBB-RV64-NEXT: add a4, a3, a2 ; ZVBB-RV64-NEXT: vmv1r.v v12, v16 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v13, v18 ; ZVBB-RV64-NEXT: vsseg6e16.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re16.v v14, (a1) ; ZVBB-RV64-NEXT: add a1, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v15, (a5) ; ZVBB-RV64-NEXT: add a5, a1, a2 ; ZVBB-RV64-NEXT: vl1re16.v v18, (a5) ; ZVBB-RV64-NEXT: add a5, a5, a2 ; ZVBB-RV64-NEXT: vl1re16.v v19, (a5) ; ZVBB-RV64-NEXT: add a5, a4, a2 ; ZVBB-RV64-NEXT: vl1re16.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vl1re16.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v13, (a6) ; ZVBB-RV64-NEXT: csrr a6, vlenb ; ZVBB-RV64-NEXT: li a7, 12 ; ZVBB-RV64-NEXT: mul a6, a6, a7 ; ZVBB-RV64-NEXT: add a6, sp, a6 ; ZVBB-RV64-NEXT: addi a6, a6, 64 ; ZVBB-RV64-NEXT: vl1re16.v v17, (a1) ; ZVBB-RV64-NEXT: vl1re16.v v10, (a4) ; ZVBB-RV64-NEXT: vl1re16.v v11, (a5) ; ZVBB-RV64-NEXT: vl1re16.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re16.v v9, (a3) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a6, a2 ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a6) ; ZVBB-RV64-NEXT: vl8re16.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re16.v v8, (a6) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv48bf16_nxv8bf16: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 28 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v14 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: vmv2r.v v24, v10 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: li a0, 6 ; ZIP-NEXT: mul a1, a1, a0 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv1r.v v10, v25 ; ZIP-NEXT: vmv1r.v v11, v23 ; ZIP-NEXT: vmv1r.v v12, v21 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv1r.v v13, v17 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv1r.v v14, v19 ; ZIP-NEXT: vsseg6e16.v v9, (a1) ; ZIP-NEXT: vmv1r.v v9, v24 ; ZIP-NEXT: add a5, a1, a2 ; ZIP-NEXT: vmv1r.v v10, v22 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v11, v20 ; ZIP-NEXT: add a4, a3, a2 ; ZIP-NEXT: vmv1r.v v12, v16 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v13, v18 ; ZIP-NEXT: vsseg6e16.v v8, (a0) ; ZIP-NEXT: vl1re16.v v14, (a1) ; ZIP-NEXT: add a1, a6, a2 ; ZIP-NEXT: vl1re16.v v15, (a5) ; ZIP-NEXT: add a5, a1, a2 ; ZIP-NEXT: vl1re16.v v18, (a5) ; ZIP-NEXT: add a5, a5, a2 ; ZIP-NEXT: vl1re16.v v19, (a5) ; ZIP-NEXT: add a5, a4, a2 ; ZIP-NEXT: vl1re16.v v16, (a6) ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vl1re16.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v13, (a6) ; ZIP-NEXT: csrr a6, vlenb ; ZIP-NEXT: li a7, 12 ; ZIP-NEXT: mul a6, a6, a7 ; ZIP-NEXT: add a6, sp, a6 ; ZIP-NEXT: addi a6, a6, 64 ; ZIP-NEXT: vl1re16.v v17, (a1) ; ZIP-NEXT: vl1re16.v v10, (a4) ; ZIP-NEXT: vl1re16.v v11, (a5) ; ZIP-NEXT: vl1re16.v v8, (a0) ; ZIP-NEXT: vl1re16.v v9, (a3) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a6, a2 ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a6) ; ZIP-NEXT: vl8re16.v v16, (a2) ; ZIP-NEXT: vl8re16.v v8, (a6) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave6.nxv48bf16( %v0, %v1, %v2, %v3, %v4, %v5) ret %res } define @vector_interleave_nxv6f32_nxv1f32( %v0, %v1, %v2, %v3, %v4, %v6) nounwind { ; CHECK-LABEL: vector_interleave_nxv6f32_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: add a5, a4, a2 ; CHECK-NEXT: vsetvli a6, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: add a6, a5, a2 ; CHECK-NEXT: add a2, a6, a2 ; CHECK-NEXT: vle32.v v10, (a6) ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: srli a1, a1, 3 ; CHECK-NEXT: vle32.v v11, (a5) ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v10, v8, a1 ; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a4) ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v11, a1 ; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v11, (a3) ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v11, a1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv6f32_nxv1f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: add a5, a4, a2 ; ZVBB-NEXT: vsetvli a6, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vsseg6e32.v v8, (a0) ; ZVBB-NEXT: add a6, a5, a2 ; ZVBB-NEXT: add a2, a6, a2 ; ZVBB-NEXT: vle32.v v10, (a6) ; ZVBB-NEXT: vle32.v v8, (a2) ; ZVBB-NEXT: srli a1, a1, 3 ; ZVBB-NEXT: vle32.v v11, (a5) ; ZVBB-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v10, v8, a1 ; ZVBB-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vle32.v v9, (a4) ; ZVBB-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v11, a1 ; ZVBB-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vle32.v v11, (a3) ; ZVBB-NEXT: vle32.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v11, a1 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 1 ; ZVBB-NEXT: add a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave6.nxv6f32( %v0, %v1, %v2, %v3, %v4, %v6) ret %res } define @vector_interleave_nxv12f32_nxv2f32( %v0, %v1, %v2, %v3, %v4, %v5) nounwind { ; CHECK-LABEL: vector_interleave_nxv12f32_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e32, m1, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: vl1re32.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re32.v v11, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re32.v v8, (a0) ; CHECK-NEXT: vl1re32.v v9, (a2) ; CHECK-NEXT: vl1re32.v v12, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1re32.v v13, (a1) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv12f32_nxv2f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e32, m1, ta, ma ; ZVBB-NEXT: vsseg6e32.v v8, (a0) ; ZVBB-NEXT: vl1re32.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re32.v v11, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re32.v v8, (a0) ; ZVBB-NEXT: vl1re32.v v9, (a2) ; ZVBB-NEXT: vl1re32.v v12, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1re32.v v13, (a1) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave6.nxv12f32( %v0, %v1, %v2, %v3, %v4, %v5) ret %res } define @vector_interleave_nxv24f32_nxv4f32( %v0, %v1, %v2, %v3, %v4, %v5) nounwind { ; RV32-LABEL: vector_interleave_nxv24f32_nxv4f32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 28 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v14 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: vmv2r.v v24, v10 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a0, 6 ; RV32-NEXT: mul a1, a1, a0 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv1r.v v10, v25 ; RV32-NEXT: vmv1r.v v11, v23 ; RV32-NEXT: vmv1r.v v12, v21 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv1r.v v13, v17 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv1r.v v14, v19 ; RV32-NEXT: vsseg6e32.v v9, (a1) ; RV32-NEXT: vmv1r.v v9, v24 ; RV32-NEXT: add a5, a1, a2 ; RV32-NEXT: vmv1r.v v10, v22 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v11, v20 ; RV32-NEXT: add a4, a3, a2 ; RV32-NEXT: vmv1r.v v12, v16 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v13, v18 ; RV32-NEXT: vsseg6e32.v v8, (a0) ; RV32-NEXT: vl1re32.v v14, (a1) ; RV32-NEXT: add a1, a6, a2 ; RV32-NEXT: vl1re32.v v15, (a5) ; RV32-NEXT: add a5, a1, a2 ; RV32-NEXT: vl1re32.v v18, (a5) ; RV32-NEXT: add a5, a5, a2 ; RV32-NEXT: vl1re32.v v19, (a5) ; RV32-NEXT: add a5, a4, a2 ; RV32-NEXT: vl1re32.v v16, (a6) ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vl1re32.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v13, (a6) ; RV32-NEXT: csrr a6, vlenb ; RV32-NEXT: li a7, 12 ; RV32-NEXT: mul a6, a6, a7 ; RV32-NEXT: add a6, sp, a6 ; RV32-NEXT: addi a6, a6, 64 ; RV32-NEXT: vl1re32.v v17, (a1) ; RV32-NEXT: vl1re32.v v10, (a4) ; RV32-NEXT: vl1re32.v v11, (a5) ; RV32-NEXT: vl1re32.v v8, (a0) ; RV32-NEXT: vl1re32.v v9, (a3) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a6, a2 ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a6) ; RV32-NEXT: vl8re32.v v16, (a2) ; RV32-NEXT: vl8re32.v v8, (a6) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv24f32_nxv4f32: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 28 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v14 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: vmv2r.v v24, v10 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: li a0, 6 ; RV64-NEXT: mul a1, a1, a0 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv1r.v v10, v25 ; RV64-NEXT: vmv1r.v v11, v23 ; RV64-NEXT: vmv1r.v v12, v21 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv1r.v v13, v17 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv1r.v v14, v19 ; RV64-NEXT: vsseg6e32.v v9, (a1) ; RV64-NEXT: vmv1r.v v9, v24 ; RV64-NEXT: add a5, a1, a2 ; RV64-NEXT: vmv1r.v v10, v22 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v11, v20 ; RV64-NEXT: add a4, a3, a2 ; RV64-NEXT: vmv1r.v v12, v16 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v13, v18 ; RV64-NEXT: vsseg6e32.v v8, (a0) ; RV64-NEXT: vl1re32.v v14, (a1) ; RV64-NEXT: add a1, a6, a2 ; RV64-NEXT: vl1re32.v v15, (a5) ; RV64-NEXT: add a5, a1, a2 ; RV64-NEXT: vl1re32.v v18, (a5) ; RV64-NEXT: add a5, a5, a2 ; RV64-NEXT: vl1re32.v v19, (a5) ; RV64-NEXT: add a5, a4, a2 ; RV64-NEXT: vl1re32.v v16, (a6) ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vl1re32.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v13, (a6) ; RV64-NEXT: csrr a6, vlenb ; RV64-NEXT: li a7, 12 ; RV64-NEXT: mul a6, a6, a7 ; RV64-NEXT: add a6, sp, a6 ; RV64-NEXT: addi a6, a6, 64 ; RV64-NEXT: vl1re32.v v17, (a1) ; RV64-NEXT: vl1re32.v v10, (a4) ; RV64-NEXT: vl1re32.v v11, (a5) ; RV64-NEXT: vl1re32.v v8, (a0) ; RV64-NEXT: vl1re32.v v9, (a3) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a6, a2 ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a6) ; RV64-NEXT: vl8re32.v v16, (a2) ; RV64-NEXT: vl8re32.v v8, (a6) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv24f32_nxv4f32: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a1, 28 ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v14 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: vmv2r.v v24, v10 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: li a0, 6 ; ZVBB-RV32-NEXT: mul a1, a1, a0 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv1r.v v10, v25 ; ZVBB-RV32-NEXT: vmv1r.v v11, v23 ; ZVBB-RV32-NEXT: vmv1r.v v12, v21 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv1r.v v13, v17 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv1r.v v14, v19 ; ZVBB-RV32-NEXT: vsseg6e32.v v9, (a1) ; ZVBB-RV32-NEXT: vmv1r.v v9, v24 ; ZVBB-RV32-NEXT: add a5, a1, a2 ; ZVBB-RV32-NEXT: vmv1r.v v10, v22 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v11, v20 ; ZVBB-RV32-NEXT: add a4, a3, a2 ; ZVBB-RV32-NEXT: vmv1r.v v12, v16 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v13, v18 ; ZVBB-RV32-NEXT: vsseg6e32.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re32.v v14, (a1) ; ZVBB-RV32-NEXT: add a1, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v15, (a5) ; ZVBB-RV32-NEXT: add a5, a1, a2 ; ZVBB-RV32-NEXT: vl1re32.v v18, (a5) ; ZVBB-RV32-NEXT: add a5, a5, a2 ; ZVBB-RV32-NEXT: vl1re32.v v19, (a5) ; ZVBB-RV32-NEXT: add a5, a4, a2 ; ZVBB-RV32-NEXT: vl1re32.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vl1re32.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v13, (a6) ; ZVBB-RV32-NEXT: csrr a6, vlenb ; ZVBB-RV32-NEXT: li a7, 12 ; ZVBB-RV32-NEXT: mul a6, a6, a7 ; ZVBB-RV32-NEXT: add a6, sp, a6 ; ZVBB-RV32-NEXT: addi a6, a6, 64 ; ZVBB-RV32-NEXT: vl1re32.v v17, (a1) ; ZVBB-RV32-NEXT: vl1re32.v v10, (a4) ; ZVBB-RV32-NEXT: vl1re32.v v11, (a5) ; ZVBB-RV32-NEXT: vl1re32.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re32.v v9, (a3) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a6, a2 ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a6) ; ZVBB-RV32-NEXT: vl8re32.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re32.v v8, (a6) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv24f32_nxv4f32: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a1, 28 ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v14 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: vmv2r.v v24, v10 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: li a0, 6 ; ZVBB-RV64-NEXT: mul a1, a1, a0 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv1r.v v10, v25 ; ZVBB-RV64-NEXT: vmv1r.v v11, v23 ; ZVBB-RV64-NEXT: vmv1r.v v12, v21 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv1r.v v13, v17 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv1r.v v14, v19 ; ZVBB-RV64-NEXT: vsseg6e32.v v9, (a1) ; ZVBB-RV64-NEXT: vmv1r.v v9, v24 ; ZVBB-RV64-NEXT: add a5, a1, a2 ; ZVBB-RV64-NEXT: vmv1r.v v10, v22 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v11, v20 ; ZVBB-RV64-NEXT: add a4, a3, a2 ; ZVBB-RV64-NEXT: vmv1r.v v12, v16 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v13, v18 ; ZVBB-RV64-NEXT: vsseg6e32.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re32.v v14, (a1) ; ZVBB-RV64-NEXT: add a1, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v15, (a5) ; ZVBB-RV64-NEXT: add a5, a1, a2 ; ZVBB-RV64-NEXT: vl1re32.v v18, (a5) ; ZVBB-RV64-NEXT: add a5, a5, a2 ; ZVBB-RV64-NEXT: vl1re32.v v19, (a5) ; ZVBB-RV64-NEXT: add a5, a4, a2 ; ZVBB-RV64-NEXT: vl1re32.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vl1re32.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v13, (a6) ; ZVBB-RV64-NEXT: csrr a6, vlenb ; ZVBB-RV64-NEXT: li a7, 12 ; ZVBB-RV64-NEXT: mul a6, a6, a7 ; ZVBB-RV64-NEXT: add a6, sp, a6 ; ZVBB-RV64-NEXT: addi a6, a6, 64 ; ZVBB-RV64-NEXT: vl1re32.v v17, (a1) ; ZVBB-RV64-NEXT: vl1re32.v v10, (a4) ; ZVBB-RV64-NEXT: vl1re32.v v11, (a5) ; ZVBB-RV64-NEXT: vl1re32.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re32.v v9, (a3) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a6, a2 ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a6) ; ZVBB-RV64-NEXT: vl8re32.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re32.v v8, (a6) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv24f32_nxv4f32: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 28 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v14 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: vmv2r.v v24, v10 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: li a0, 6 ; ZIP-NEXT: mul a1, a1, a0 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv1r.v v10, v25 ; ZIP-NEXT: vmv1r.v v11, v23 ; ZIP-NEXT: vmv1r.v v12, v21 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv1r.v v13, v17 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv1r.v v14, v19 ; ZIP-NEXT: vsseg6e32.v v9, (a1) ; ZIP-NEXT: vmv1r.v v9, v24 ; ZIP-NEXT: add a5, a1, a2 ; ZIP-NEXT: vmv1r.v v10, v22 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v11, v20 ; ZIP-NEXT: add a4, a3, a2 ; ZIP-NEXT: vmv1r.v v12, v16 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v13, v18 ; ZIP-NEXT: vsseg6e32.v v8, (a0) ; ZIP-NEXT: vl1re32.v v14, (a1) ; ZIP-NEXT: add a1, a6, a2 ; ZIP-NEXT: vl1re32.v v15, (a5) ; ZIP-NEXT: add a5, a1, a2 ; ZIP-NEXT: vl1re32.v v18, (a5) ; ZIP-NEXT: add a5, a5, a2 ; ZIP-NEXT: vl1re32.v v19, (a5) ; ZIP-NEXT: add a5, a4, a2 ; ZIP-NEXT: vl1re32.v v16, (a6) ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vl1re32.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v13, (a6) ; ZIP-NEXT: csrr a6, vlenb ; ZIP-NEXT: li a7, 12 ; ZIP-NEXT: mul a6, a6, a7 ; ZIP-NEXT: add a6, sp, a6 ; ZIP-NEXT: addi a6, a6, 64 ; ZIP-NEXT: vl1re32.v v17, (a1) ; ZIP-NEXT: vl1re32.v v10, (a4) ; ZIP-NEXT: vl1re32.v v11, (a5) ; ZIP-NEXT: vl1re32.v v8, (a0) ; ZIP-NEXT: vl1re32.v v9, (a3) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a6, a2 ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a6) ; ZIP-NEXT: vl8re32.v v16, (a2) ; ZIP-NEXT: vl8re32.v v8, (a6) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave6.nxv24f32( %v0, %v1, %v2, %v3, %v4, %v5) ret %res } define @vector_interleave_nxv6f64_nxv1f64( %v0, %v1, %v2, %v3, %v4, %v5) nounwind { ; CHECK-LABEL: vector_interleave_nxv6f64_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e64, m1, ta, ma ; CHECK-NEXT: vsseg6e64.v v8, (a0) ; CHECK-NEXT: vl1re64.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re64.v v11, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re64.v v8, (a0) ; CHECK-NEXT: vl1re64.v v9, (a2) ; CHECK-NEXT: vl1re64.v v12, (a3) ; CHECK-NEXT: add a1, a3, a1 ; CHECK-NEXT: vl1re64.v v13, (a1) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv6f64_nxv1f64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e64, m1, ta, ma ; ZVBB-NEXT: vsseg6e64.v v8, (a0) ; ZVBB-NEXT: vl1re64.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re64.v v11, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re64.v v8, (a0) ; ZVBB-NEXT: vl1re64.v v9, (a2) ; ZVBB-NEXT: vl1re64.v v12, (a3) ; ZVBB-NEXT: add a1, a3, a1 ; ZVBB-NEXT: vl1re64.v v13, (a1) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: li a1, 6 ; ZVBB-NEXT: mul a0, a0, a1 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave6.nxv6f64( %v0, %v1, %v2, %v3, %v4, %v5) ret %res } define @vector_interleave_nxv12f64_nxv2f64( %v0, %v1, %v2, %v3, %v4, %v5) nounwind { ; RV32-LABEL: vector_interleave_nxv12f64_nxv2f64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 28 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vmv2r.v v20, v14 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: vmv2r.v v24, v10 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a0, 6 ; RV32-NEXT: mul a1, a1, a0 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv1r.v v10, v25 ; RV32-NEXT: vmv1r.v v11, v23 ; RV32-NEXT: vmv1r.v v12, v21 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv1r.v v13, v17 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv1r.v v14, v19 ; RV32-NEXT: vsseg6e64.v v9, (a1) ; RV32-NEXT: vmv1r.v v9, v24 ; RV32-NEXT: add a5, a1, a2 ; RV32-NEXT: vmv1r.v v10, v22 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v11, v20 ; RV32-NEXT: add a4, a3, a2 ; RV32-NEXT: vmv1r.v v12, v16 ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vmv1r.v v13, v18 ; RV32-NEXT: vsseg6e64.v v8, (a0) ; RV32-NEXT: vl1re64.v v14, (a1) ; RV32-NEXT: add a1, a6, a2 ; RV32-NEXT: vl1re64.v v15, (a5) ; RV32-NEXT: add a5, a1, a2 ; RV32-NEXT: vl1re64.v v18, (a5) ; RV32-NEXT: add a5, a5, a2 ; RV32-NEXT: vl1re64.v v19, (a5) ; RV32-NEXT: add a5, a4, a2 ; RV32-NEXT: vl1re64.v v16, (a6) ; RV32-NEXT: add a6, a5, a2 ; RV32-NEXT: vl1re64.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v13, (a6) ; RV32-NEXT: csrr a6, vlenb ; RV32-NEXT: li a7, 12 ; RV32-NEXT: mul a6, a6, a7 ; RV32-NEXT: add a6, sp, a6 ; RV32-NEXT: addi a6, a6, 64 ; RV32-NEXT: vl1re64.v v17, (a1) ; RV32-NEXT: vl1re64.v v10, (a4) ; RV32-NEXT: vl1re64.v v11, (a5) ; RV32-NEXT: vl1re64.v v8, (a0) ; RV32-NEXT: vl1re64.v v9, (a3) ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a6, a2 ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a6) ; RV32-NEXT: vl8re64.v v16, (a2) ; RV32-NEXT: vl8re64.v v8, (a6) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv12f64_nxv2f64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 28 ; RV64-NEXT: mul a0, a0, a1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vmv2r.v v20, v14 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: vmv2r.v v24, v10 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: li a0, 6 ; RV64-NEXT: mul a1, a1, a0 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv1r.v v10, v25 ; RV64-NEXT: vmv1r.v v11, v23 ; RV64-NEXT: vmv1r.v v12, v21 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv1r.v v13, v17 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv1r.v v14, v19 ; RV64-NEXT: vsseg6e64.v v9, (a1) ; RV64-NEXT: vmv1r.v v9, v24 ; RV64-NEXT: add a5, a1, a2 ; RV64-NEXT: vmv1r.v v10, v22 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v11, v20 ; RV64-NEXT: add a4, a3, a2 ; RV64-NEXT: vmv1r.v v12, v16 ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vmv1r.v v13, v18 ; RV64-NEXT: vsseg6e64.v v8, (a0) ; RV64-NEXT: vl1re64.v v14, (a1) ; RV64-NEXT: add a1, a6, a2 ; RV64-NEXT: vl1re64.v v15, (a5) ; RV64-NEXT: add a5, a1, a2 ; RV64-NEXT: vl1re64.v v18, (a5) ; RV64-NEXT: add a5, a5, a2 ; RV64-NEXT: vl1re64.v v19, (a5) ; RV64-NEXT: add a5, a4, a2 ; RV64-NEXT: vl1re64.v v16, (a6) ; RV64-NEXT: add a6, a5, a2 ; RV64-NEXT: vl1re64.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v13, (a6) ; RV64-NEXT: csrr a6, vlenb ; RV64-NEXT: li a7, 12 ; RV64-NEXT: mul a6, a6, a7 ; RV64-NEXT: add a6, sp, a6 ; RV64-NEXT: addi a6, a6, 64 ; RV64-NEXT: vl1re64.v v17, (a1) ; RV64-NEXT: vl1re64.v v10, (a4) ; RV64-NEXT: vl1re64.v v11, (a5) ; RV64-NEXT: vl1re64.v v8, (a0) ; RV64-NEXT: vl1re64.v v9, (a3) ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a6, a2 ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a6) ; RV64-NEXT: vl8re64.v v16, (a2) ; RV64-NEXT: vl8re64.v v8, (a6) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv12f64_nxv2f64: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a1, 28 ; ZVBB-RV32-NEXT: mul a0, a0, a1 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v20, v14 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: vmv2r.v v24, v10 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: li a0, 6 ; ZVBB-RV32-NEXT: mul a1, a1, a0 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv1r.v v10, v25 ; ZVBB-RV32-NEXT: vmv1r.v v11, v23 ; ZVBB-RV32-NEXT: vmv1r.v v12, v21 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv1r.v v13, v17 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv1r.v v14, v19 ; ZVBB-RV32-NEXT: vsseg6e64.v v9, (a1) ; ZVBB-RV32-NEXT: vmv1r.v v9, v24 ; ZVBB-RV32-NEXT: add a5, a1, a2 ; ZVBB-RV32-NEXT: vmv1r.v v10, v22 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v11, v20 ; ZVBB-RV32-NEXT: add a4, a3, a2 ; ZVBB-RV32-NEXT: vmv1r.v v12, v16 ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vmv1r.v v13, v18 ; ZVBB-RV32-NEXT: vsseg6e64.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re64.v v14, (a1) ; ZVBB-RV32-NEXT: add a1, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v15, (a5) ; ZVBB-RV32-NEXT: add a5, a1, a2 ; ZVBB-RV32-NEXT: vl1re64.v v18, (a5) ; ZVBB-RV32-NEXT: add a5, a5, a2 ; ZVBB-RV32-NEXT: vl1re64.v v19, (a5) ; ZVBB-RV32-NEXT: add a5, a4, a2 ; ZVBB-RV32-NEXT: vl1re64.v v16, (a6) ; ZVBB-RV32-NEXT: add a6, a5, a2 ; ZVBB-RV32-NEXT: vl1re64.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v13, (a6) ; ZVBB-RV32-NEXT: csrr a6, vlenb ; ZVBB-RV32-NEXT: li a7, 12 ; ZVBB-RV32-NEXT: mul a6, a6, a7 ; ZVBB-RV32-NEXT: add a6, sp, a6 ; ZVBB-RV32-NEXT: addi a6, a6, 64 ; ZVBB-RV32-NEXT: vl1re64.v v17, (a1) ; ZVBB-RV32-NEXT: vl1re64.v v10, (a4) ; ZVBB-RV32-NEXT: vl1re64.v v11, (a5) ; ZVBB-RV32-NEXT: vl1re64.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re64.v v9, (a3) ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a6, a2 ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a6) ; ZVBB-RV32-NEXT: vl8re64.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re64.v v8, (a6) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv12f64_nxv2f64: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a1, 28 ; ZVBB-RV64-NEXT: mul a0, a0, a1 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v20, v14 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: vmv2r.v v24, v10 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: li a0, 6 ; ZVBB-RV64-NEXT: mul a1, a1, a0 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv1r.v v10, v25 ; ZVBB-RV64-NEXT: vmv1r.v v11, v23 ; ZVBB-RV64-NEXT: vmv1r.v v12, v21 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv1r.v v13, v17 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv1r.v v14, v19 ; ZVBB-RV64-NEXT: vsseg6e64.v v9, (a1) ; ZVBB-RV64-NEXT: vmv1r.v v9, v24 ; ZVBB-RV64-NEXT: add a5, a1, a2 ; ZVBB-RV64-NEXT: vmv1r.v v10, v22 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v11, v20 ; ZVBB-RV64-NEXT: add a4, a3, a2 ; ZVBB-RV64-NEXT: vmv1r.v v12, v16 ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vmv1r.v v13, v18 ; ZVBB-RV64-NEXT: vsseg6e64.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re64.v v14, (a1) ; ZVBB-RV64-NEXT: add a1, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v15, (a5) ; ZVBB-RV64-NEXT: add a5, a1, a2 ; ZVBB-RV64-NEXT: vl1re64.v v18, (a5) ; ZVBB-RV64-NEXT: add a5, a5, a2 ; ZVBB-RV64-NEXT: vl1re64.v v19, (a5) ; ZVBB-RV64-NEXT: add a5, a4, a2 ; ZVBB-RV64-NEXT: vl1re64.v v16, (a6) ; ZVBB-RV64-NEXT: add a6, a5, a2 ; ZVBB-RV64-NEXT: vl1re64.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v13, (a6) ; ZVBB-RV64-NEXT: csrr a6, vlenb ; ZVBB-RV64-NEXT: li a7, 12 ; ZVBB-RV64-NEXT: mul a6, a6, a7 ; ZVBB-RV64-NEXT: add a6, sp, a6 ; ZVBB-RV64-NEXT: addi a6, a6, 64 ; ZVBB-RV64-NEXT: vl1re64.v v17, (a1) ; ZVBB-RV64-NEXT: vl1re64.v v10, (a4) ; ZVBB-RV64-NEXT: vl1re64.v v11, (a5) ; ZVBB-RV64-NEXT: vl1re64.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re64.v v9, (a3) ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a6, a2 ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a6) ; ZVBB-RV64-NEXT: vl8re64.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re64.v v8, (a6) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv12f64_nxv2f64: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 28 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZIP-NEXT: vmv2r.v v20, v14 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: vmv2r.v v24, v10 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: li a0, 6 ; ZIP-NEXT: mul a1, a1, a0 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv1r.v v10, v25 ; ZIP-NEXT: vmv1r.v v11, v23 ; ZIP-NEXT: vmv1r.v v12, v21 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv1r.v v13, v17 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv1r.v v14, v19 ; ZIP-NEXT: vsseg6e64.v v9, (a1) ; ZIP-NEXT: vmv1r.v v9, v24 ; ZIP-NEXT: add a5, a1, a2 ; ZIP-NEXT: vmv1r.v v10, v22 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v11, v20 ; ZIP-NEXT: add a4, a3, a2 ; ZIP-NEXT: vmv1r.v v12, v16 ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vmv1r.v v13, v18 ; ZIP-NEXT: vsseg6e64.v v8, (a0) ; ZIP-NEXT: vl1re64.v v14, (a1) ; ZIP-NEXT: add a1, a6, a2 ; ZIP-NEXT: vl1re64.v v15, (a5) ; ZIP-NEXT: add a5, a1, a2 ; ZIP-NEXT: vl1re64.v v18, (a5) ; ZIP-NEXT: add a5, a5, a2 ; ZIP-NEXT: vl1re64.v v19, (a5) ; ZIP-NEXT: add a5, a4, a2 ; ZIP-NEXT: vl1re64.v v16, (a6) ; ZIP-NEXT: add a6, a5, a2 ; ZIP-NEXT: vl1re64.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v13, (a6) ; ZIP-NEXT: csrr a6, vlenb ; ZIP-NEXT: li a7, 12 ; ZIP-NEXT: mul a6, a6, a7 ; ZIP-NEXT: add a6, sp, a6 ; ZIP-NEXT: addi a6, a6, 64 ; ZIP-NEXT: vl1re64.v v17, (a1) ; ZIP-NEXT: vl1re64.v v10, (a4) ; ZIP-NEXT: vl1re64.v v11, (a5) ; ZIP-NEXT: vl1re64.v v8, (a0) ; ZIP-NEXT: vl1re64.v v9, (a3) ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a6, a2 ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a6) ; ZIP-NEXT: vl8re64.v v16, (a2) ; ZIP-NEXT: vl8re64.v v8, (a6) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave6.nxv12f64( %v0, %v1, %v2, %v3, %v4, %v5) ret %res } define @vector_interleave_nxv14f16_nxv2f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { ; CHECK-LABEL: vector_interleave_nxv14f16_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: add a5, a4, a2 ; CHECK-NEXT: add a6, a5, a2 ; CHECK-NEXT: vsetvli a7, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: add a7, a6, a2 ; CHECK-NEXT: vle16.v v8, (a7) ; CHECK-NEXT: vle16.v v10, (a6) ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: add a2, a7, a2 ; CHECK-NEXT: vle16.v v12, (a5) ; CHECK-NEXT: vsetvli a5, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v10, v8, a1 ; CHECK-NEXT: vsetvli a5, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v11, (a2) ; CHECK-NEXT: vle16.v v9, (a4) ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v12, a1 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v12, (a3) ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv14f16_nxv2f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: add a5, a4, a2 ; ZVBB-NEXT: add a6, a5, a2 ; ZVBB-NEXT: vsetvli a7, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vsseg7e16.v v8, (a0) ; ZVBB-NEXT: add a7, a6, a2 ; ZVBB-NEXT: vle16.v v8, (a7) ; ZVBB-NEXT: vle16.v v10, (a6) ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: add a2, a7, a2 ; ZVBB-NEXT: vle16.v v12, (a5) ; ZVBB-NEXT: vsetvli a5, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v10, v8, a1 ; ZVBB-NEXT: vsetvli a5, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v11, (a2) ; ZVBB-NEXT: vle16.v v9, (a4) ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v12, a1 ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v12, (a3) ; ZVBB-NEXT: vle16.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v12, a1 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave7.nxv14f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) ret %res } define @vector_interleave_nxv28f16_nxv4f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { ; CHECK-LABEL: vector_interleave_nxv28f16_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 3 ; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re16.v v11, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: add a0, a3, a1 ; CHECK-NEXT: vl1re16.v v9, (a2) ; CHECK-NEXT: vl1re16.v v12, (a3) ; CHECK-NEXT: vl1re16.v v13, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl1re16.v v14, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 3 ; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv28f16_nxv4f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 3 ; ZVBB-NEXT: sub a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; ZVBB-NEXT: vsseg7e16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re16.v v11, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: add a0, a3, a1 ; ZVBB-NEXT: vl1re16.v v9, (a2) ; ZVBB-NEXT: vl1re16.v v12, (a3) ; ZVBB-NEXT: vl1re16.v v13, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl1re16.v v14, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 3 ; ZVBB-NEXT: sub a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave7.nxv28f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) ret %res } define @vector_interleave_nxv56f16_nxv8f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { ; RV32-LABEL: vector_interleave_nxv56f16_nxv8f16: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vmv2r.v v26, v20 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v24, v16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 3 ; RV32-NEXT: sub a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v20, v8 ; RV32-NEXT: vmv1r.v v1, v20 ; RV32-NEXT: vmv1r.v v3, v22 ; RV32-NEXT: vmv1r.v v5, v24 ; RV32-NEXT: vmv1r.v v7, v26 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v2, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: slli a5, a2, 2 ; RV32-NEXT: vmv1r.v v4, v14 ; RV32-NEXT: slli a6, a2, 4 ; RV32-NEXT: add a7, a4, a2 ; RV32-NEXT: vmv1r.v v6, v18 ; RV32-NEXT: sub a5, a6, a5 ; RV32-NEXT: vmv1r.v v22, v11 ; RV32-NEXT: add a6, a7, a2 ; RV32-NEXT: vmv1r.v v24, v15 ; RV32-NEXT: vsseg7e16.v v1, (a0) ; RV32-NEXT: vmv1r.v v26, v19 ; RV32-NEXT: vsseg7e16.v v21, (a1) ; RV32-NEXT: vl1re16.v v18, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v19, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v20, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v21, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1re16.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v11, (a6) ; RV32-NEXT: vl1re16.v v8, (a0) ; RV32-NEXT: vl1re16.v v16, (a4) ; RV32-NEXT: vl1re16.v v9, (a3) ; RV32-NEXT: vl1re16.v v17, (a7) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 14 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v13, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vl1re16.v v14, (a6) ; RV32-NEXT: vl1re16.v v15, (a1) ; RV32-NEXT: add a5, a0, a5 ; RV32-NEXT: vs2r.v v20, (a5) ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8re16.v v16, (a2) ; RV32-NEXT: vl8re16.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv56f16_nxv8f16: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 5 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vmv2r.v v26, v20 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v24, v16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 3 ; RV64-NEXT: sub a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v20, v8 ; RV64-NEXT: vmv1r.v v1, v20 ; RV64-NEXT: vmv1r.v v3, v22 ; RV64-NEXT: vmv1r.v v5, v24 ; RV64-NEXT: vmv1r.v v7, v26 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v2, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: slli a5, a2, 2 ; RV64-NEXT: vmv1r.v v4, v14 ; RV64-NEXT: slli a6, a2, 4 ; RV64-NEXT: add a7, a4, a2 ; RV64-NEXT: vmv1r.v v6, v18 ; RV64-NEXT: sub a5, a6, a5 ; RV64-NEXT: vmv1r.v v22, v11 ; RV64-NEXT: add a6, a7, a2 ; RV64-NEXT: vmv1r.v v24, v15 ; RV64-NEXT: vsseg7e16.v v1, (a0) ; RV64-NEXT: vmv1r.v v26, v19 ; RV64-NEXT: vsseg7e16.v v21, (a1) ; RV64-NEXT: vl1re16.v v18, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v19, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v20, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v21, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1re16.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v11, (a6) ; RV64-NEXT: vl1re16.v v8, (a0) ; RV64-NEXT: vl1re16.v v16, (a4) ; RV64-NEXT: vl1re16.v v9, (a3) ; RV64-NEXT: vl1re16.v v17, (a7) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 14 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v13, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vl1re16.v v14, (a6) ; RV64-NEXT: vl1re16.v v15, (a1) ; RV64-NEXT: add a5, a0, a5 ; RV64-NEXT: vs2r.v v20, (a5) ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8re16.v v16, (a2) ; RV64-NEXT: vl8re16.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv56f16_nxv8f16: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: slli a0, a0, 5 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v26, v20 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v24, v16 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 3 ; ZVBB-RV32-NEXT: sub a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v20, v8 ; ZVBB-RV32-NEXT: vmv1r.v v1, v20 ; ZVBB-RV32-NEXT: vmv1r.v v3, v22 ; ZVBB-RV32-NEXT: vmv1r.v v5, v24 ; ZVBB-RV32-NEXT: vmv1r.v v7, v26 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v2, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: slli a5, a2, 2 ; ZVBB-RV32-NEXT: vmv1r.v v4, v14 ; ZVBB-RV32-NEXT: slli a6, a2, 4 ; ZVBB-RV32-NEXT: add a7, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v6, v18 ; ZVBB-RV32-NEXT: sub a5, a6, a5 ; ZVBB-RV32-NEXT: vmv1r.v v22, v11 ; ZVBB-RV32-NEXT: add a6, a7, a2 ; ZVBB-RV32-NEXT: vmv1r.v v24, v15 ; ZVBB-RV32-NEXT: vsseg7e16.v v1, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v26, v19 ; ZVBB-RV32-NEXT: vsseg7e16.v v21, (a1) ; ZVBB-RV32-NEXT: vl1re16.v v18, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v19, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v20, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v21, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1re16.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v11, (a6) ; ZVBB-RV32-NEXT: vl1re16.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re16.v v16, (a4) ; ZVBB-RV32-NEXT: vl1re16.v v9, (a3) ; ZVBB-RV32-NEXT: vl1re16.v v17, (a7) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 14 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v13, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vl1re16.v v14, (a6) ; ZVBB-RV32-NEXT: vl1re16.v v15, (a1) ; ZVBB-RV32-NEXT: add a5, a0, a5 ; ZVBB-RV32-NEXT: vs2r.v v20, (a5) ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8re16.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re16.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv56f16_nxv8f16: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: slli a0, a0, 5 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v26, v20 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v24, v16 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 3 ; ZVBB-RV64-NEXT: sub a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v20, v8 ; ZVBB-RV64-NEXT: vmv1r.v v1, v20 ; ZVBB-RV64-NEXT: vmv1r.v v3, v22 ; ZVBB-RV64-NEXT: vmv1r.v v5, v24 ; ZVBB-RV64-NEXT: vmv1r.v v7, v26 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v2, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: slli a5, a2, 2 ; ZVBB-RV64-NEXT: vmv1r.v v4, v14 ; ZVBB-RV64-NEXT: slli a6, a2, 4 ; ZVBB-RV64-NEXT: add a7, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v6, v18 ; ZVBB-RV64-NEXT: sub a5, a6, a5 ; ZVBB-RV64-NEXT: vmv1r.v v22, v11 ; ZVBB-RV64-NEXT: add a6, a7, a2 ; ZVBB-RV64-NEXT: vmv1r.v v24, v15 ; ZVBB-RV64-NEXT: vsseg7e16.v v1, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v26, v19 ; ZVBB-RV64-NEXT: vsseg7e16.v v21, (a1) ; ZVBB-RV64-NEXT: vl1re16.v v18, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v19, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v20, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v21, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1re16.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v11, (a6) ; ZVBB-RV64-NEXT: vl1re16.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re16.v v16, (a4) ; ZVBB-RV64-NEXT: vl1re16.v v9, (a3) ; ZVBB-RV64-NEXT: vl1re16.v v17, (a7) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 14 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v13, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vl1re16.v v14, (a6) ; ZVBB-RV64-NEXT: vl1re16.v v15, (a1) ; ZVBB-RV64-NEXT: add a5, a0, a5 ; ZVBB-RV64-NEXT: vs2r.v v20, (a5) ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8re16.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re16.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv56f16_nxv8f16: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: slli a0, a0, 5 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZIP-NEXT: vmv2r.v v26, v20 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v24, v16 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 3 ; ZIP-NEXT: sub a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v20, v8 ; ZIP-NEXT: vmv1r.v v1, v20 ; ZIP-NEXT: vmv1r.v v3, v22 ; ZIP-NEXT: vmv1r.v v5, v24 ; ZIP-NEXT: vmv1r.v v7, v26 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v2, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: slli a5, a2, 2 ; ZIP-NEXT: vmv1r.v v4, v14 ; ZIP-NEXT: slli a6, a2, 4 ; ZIP-NEXT: add a7, a4, a2 ; ZIP-NEXT: vmv1r.v v6, v18 ; ZIP-NEXT: sub a5, a6, a5 ; ZIP-NEXT: vmv1r.v v22, v11 ; ZIP-NEXT: add a6, a7, a2 ; ZIP-NEXT: vmv1r.v v24, v15 ; ZIP-NEXT: vsseg7e16.v v1, (a0) ; ZIP-NEXT: vmv1r.v v26, v19 ; ZIP-NEXT: vsseg7e16.v v21, (a1) ; ZIP-NEXT: vl1re16.v v18, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v19, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v20, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v21, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1re16.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v11, (a6) ; ZIP-NEXT: vl1re16.v v8, (a0) ; ZIP-NEXT: vl1re16.v v16, (a4) ; ZIP-NEXT: vl1re16.v v9, (a3) ; ZIP-NEXT: vl1re16.v v17, (a7) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 14 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v13, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vl1re16.v v14, (a6) ; ZIP-NEXT: vl1re16.v v15, (a1) ; ZIP-NEXT: add a5, a0, a5 ; ZIP-NEXT: vs2r.v v20, (a5) ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8re16.v v16, (a2) ; ZIP-NEXT: vl8re16.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave7.nxv56f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) ret %res } define @vector_interleave_nxv14bf16_nxv2bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { ; CHECK-LABEL: vector_interleave_nxv14bf16_nxv2bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: add a5, a4, a2 ; CHECK-NEXT: add a6, a5, a2 ; CHECK-NEXT: vsetvli a7, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: add a7, a6, a2 ; CHECK-NEXT: vle16.v v8, (a7) ; CHECK-NEXT: vle16.v v10, (a6) ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: add a2, a7, a2 ; CHECK-NEXT: vle16.v v12, (a5) ; CHECK-NEXT: vsetvli a5, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v10, v8, a1 ; CHECK-NEXT: vsetvli a5, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v11, (a2) ; CHECK-NEXT: vle16.v v9, (a4) ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v12, a1 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v12, (a3) ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv14bf16_nxv2bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: add a5, a4, a2 ; ZVBB-NEXT: add a6, a5, a2 ; ZVBB-NEXT: vsetvli a7, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vsseg7e16.v v8, (a0) ; ZVBB-NEXT: add a7, a6, a2 ; ZVBB-NEXT: vle16.v v8, (a7) ; ZVBB-NEXT: vle16.v v10, (a6) ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: add a2, a7, a2 ; ZVBB-NEXT: vle16.v v12, (a5) ; ZVBB-NEXT: vsetvli a5, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v10, v8, a1 ; ZVBB-NEXT: vsetvli a5, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v11, (a2) ; ZVBB-NEXT: vle16.v v9, (a4) ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v12, a1 ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v12, (a3) ; ZVBB-NEXT: vle16.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v12, a1 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave7.nxv14bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) ret %res } define @vector_interleave_nxv28bf16_nxv4bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { ; CHECK-LABEL: vector_interleave_nxv28bf16_nxv4bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 3 ; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re16.v v11, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: add a0, a3, a1 ; CHECK-NEXT: vl1re16.v v9, (a2) ; CHECK-NEXT: vl1re16.v v12, (a3) ; CHECK-NEXT: vl1re16.v v13, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl1re16.v v14, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 3 ; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv28bf16_nxv4bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 3 ; ZVBB-NEXT: sub a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e16, m1, ta, ma ; ZVBB-NEXT: vsseg7e16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re16.v v11, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: add a0, a3, a1 ; ZVBB-NEXT: vl1re16.v v9, (a2) ; ZVBB-NEXT: vl1re16.v v12, (a3) ; ZVBB-NEXT: vl1re16.v v13, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl1re16.v v14, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 3 ; ZVBB-NEXT: sub a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave7.nxv28bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) ret %res } define @vector_interleave_nxv56bf16_nxv8bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { ; RV32-LABEL: vector_interleave_nxv56bf16_nxv8bf16: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vmv2r.v v26, v20 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v24, v16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 3 ; RV32-NEXT: sub a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v20, v8 ; RV32-NEXT: vmv1r.v v1, v20 ; RV32-NEXT: vmv1r.v v3, v22 ; RV32-NEXT: vmv1r.v v5, v24 ; RV32-NEXT: vmv1r.v v7, v26 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v2, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: slli a5, a2, 2 ; RV32-NEXT: vmv1r.v v4, v14 ; RV32-NEXT: slli a6, a2, 4 ; RV32-NEXT: add a7, a4, a2 ; RV32-NEXT: vmv1r.v v6, v18 ; RV32-NEXT: sub a5, a6, a5 ; RV32-NEXT: vmv1r.v v22, v11 ; RV32-NEXT: add a6, a7, a2 ; RV32-NEXT: vmv1r.v v24, v15 ; RV32-NEXT: vsseg7e16.v v1, (a0) ; RV32-NEXT: vmv1r.v v26, v19 ; RV32-NEXT: vsseg7e16.v v21, (a1) ; RV32-NEXT: vl1re16.v v18, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v19, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v20, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v21, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1re16.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v11, (a6) ; RV32-NEXT: vl1re16.v v8, (a0) ; RV32-NEXT: vl1re16.v v16, (a4) ; RV32-NEXT: vl1re16.v v9, (a3) ; RV32-NEXT: vl1re16.v v17, (a7) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 14 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re16.v v13, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vl1re16.v v14, (a6) ; RV32-NEXT: vl1re16.v v15, (a1) ; RV32-NEXT: add a5, a0, a5 ; RV32-NEXT: vs2r.v v20, (a5) ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8re16.v v16, (a2) ; RV32-NEXT: vl8re16.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv56bf16_nxv8bf16: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 5 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vmv2r.v v26, v20 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v24, v16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 3 ; RV64-NEXT: sub a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v20, v8 ; RV64-NEXT: vmv1r.v v1, v20 ; RV64-NEXT: vmv1r.v v3, v22 ; RV64-NEXT: vmv1r.v v5, v24 ; RV64-NEXT: vmv1r.v v7, v26 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v2, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: slli a5, a2, 2 ; RV64-NEXT: vmv1r.v v4, v14 ; RV64-NEXT: slli a6, a2, 4 ; RV64-NEXT: add a7, a4, a2 ; RV64-NEXT: vmv1r.v v6, v18 ; RV64-NEXT: sub a5, a6, a5 ; RV64-NEXT: vmv1r.v v22, v11 ; RV64-NEXT: add a6, a7, a2 ; RV64-NEXT: vmv1r.v v24, v15 ; RV64-NEXT: vsseg7e16.v v1, (a0) ; RV64-NEXT: vmv1r.v v26, v19 ; RV64-NEXT: vsseg7e16.v v21, (a1) ; RV64-NEXT: vl1re16.v v18, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v19, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v20, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v21, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1re16.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v11, (a6) ; RV64-NEXT: vl1re16.v v8, (a0) ; RV64-NEXT: vl1re16.v v16, (a4) ; RV64-NEXT: vl1re16.v v9, (a3) ; RV64-NEXT: vl1re16.v v17, (a7) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 14 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re16.v v13, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vl1re16.v v14, (a6) ; RV64-NEXT: vl1re16.v v15, (a1) ; RV64-NEXT: add a5, a0, a5 ; RV64-NEXT: vs2r.v v20, (a5) ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8re16.v v16, (a2) ; RV64-NEXT: vl8re16.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv56bf16_nxv8bf16: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: slli a0, a0, 5 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v26, v20 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v24, v16 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 3 ; ZVBB-RV32-NEXT: sub a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v20, v8 ; ZVBB-RV32-NEXT: vmv1r.v v1, v20 ; ZVBB-RV32-NEXT: vmv1r.v v3, v22 ; ZVBB-RV32-NEXT: vmv1r.v v5, v24 ; ZVBB-RV32-NEXT: vmv1r.v v7, v26 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v2, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: slli a5, a2, 2 ; ZVBB-RV32-NEXT: vmv1r.v v4, v14 ; ZVBB-RV32-NEXT: slli a6, a2, 4 ; ZVBB-RV32-NEXT: add a7, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v6, v18 ; ZVBB-RV32-NEXT: sub a5, a6, a5 ; ZVBB-RV32-NEXT: vmv1r.v v22, v11 ; ZVBB-RV32-NEXT: add a6, a7, a2 ; ZVBB-RV32-NEXT: vmv1r.v v24, v15 ; ZVBB-RV32-NEXT: vsseg7e16.v v1, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v26, v19 ; ZVBB-RV32-NEXT: vsseg7e16.v v21, (a1) ; ZVBB-RV32-NEXT: vl1re16.v v18, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v19, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v20, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v21, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1re16.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v11, (a6) ; ZVBB-RV32-NEXT: vl1re16.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re16.v v16, (a4) ; ZVBB-RV32-NEXT: vl1re16.v v9, (a3) ; ZVBB-RV32-NEXT: vl1re16.v v17, (a7) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 14 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re16.v v13, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vl1re16.v v14, (a6) ; ZVBB-RV32-NEXT: vl1re16.v v15, (a1) ; ZVBB-RV32-NEXT: add a5, a0, a5 ; ZVBB-RV32-NEXT: vs2r.v v20, (a5) ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8re16.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re16.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv56bf16_nxv8bf16: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: slli a0, a0, 5 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v26, v20 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v24, v16 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 3 ; ZVBB-RV64-NEXT: sub a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v20, v8 ; ZVBB-RV64-NEXT: vmv1r.v v1, v20 ; ZVBB-RV64-NEXT: vmv1r.v v3, v22 ; ZVBB-RV64-NEXT: vmv1r.v v5, v24 ; ZVBB-RV64-NEXT: vmv1r.v v7, v26 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v2, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: slli a5, a2, 2 ; ZVBB-RV64-NEXT: vmv1r.v v4, v14 ; ZVBB-RV64-NEXT: slli a6, a2, 4 ; ZVBB-RV64-NEXT: add a7, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v6, v18 ; ZVBB-RV64-NEXT: sub a5, a6, a5 ; ZVBB-RV64-NEXT: vmv1r.v v22, v11 ; ZVBB-RV64-NEXT: add a6, a7, a2 ; ZVBB-RV64-NEXT: vmv1r.v v24, v15 ; ZVBB-RV64-NEXT: vsseg7e16.v v1, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v26, v19 ; ZVBB-RV64-NEXT: vsseg7e16.v v21, (a1) ; ZVBB-RV64-NEXT: vl1re16.v v18, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v19, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v20, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v21, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1re16.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v11, (a6) ; ZVBB-RV64-NEXT: vl1re16.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re16.v v16, (a4) ; ZVBB-RV64-NEXT: vl1re16.v v9, (a3) ; ZVBB-RV64-NEXT: vl1re16.v v17, (a7) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 14 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re16.v v13, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vl1re16.v v14, (a6) ; ZVBB-RV64-NEXT: vl1re16.v v15, (a1) ; ZVBB-RV64-NEXT: add a5, a0, a5 ; ZVBB-RV64-NEXT: vs2r.v v20, (a5) ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8re16.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re16.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv56bf16_nxv8bf16: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: slli a0, a0, 5 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZIP-NEXT: vmv2r.v v26, v20 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v24, v16 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 3 ; ZIP-NEXT: sub a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v20, v8 ; ZIP-NEXT: vmv1r.v v1, v20 ; ZIP-NEXT: vmv1r.v v3, v22 ; ZIP-NEXT: vmv1r.v v5, v24 ; ZIP-NEXT: vmv1r.v v7, v26 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v2, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: slli a5, a2, 2 ; ZIP-NEXT: vmv1r.v v4, v14 ; ZIP-NEXT: slli a6, a2, 4 ; ZIP-NEXT: add a7, a4, a2 ; ZIP-NEXT: vmv1r.v v6, v18 ; ZIP-NEXT: sub a5, a6, a5 ; ZIP-NEXT: vmv1r.v v22, v11 ; ZIP-NEXT: add a6, a7, a2 ; ZIP-NEXT: vmv1r.v v24, v15 ; ZIP-NEXT: vsseg7e16.v v1, (a0) ; ZIP-NEXT: vmv1r.v v26, v19 ; ZIP-NEXT: vsseg7e16.v v21, (a1) ; ZIP-NEXT: vl1re16.v v18, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v19, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v20, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v21, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1re16.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v11, (a6) ; ZIP-NEXT: vl1re16.v v8, (a0) ; ZIP-NEXT: vl1re16.v v16, (a4) ; ZIP-NEXT: vl1re16.v v9, (a3) ; ZIP-NEXT: vl1re16.v v17, (a7) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 14 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re16.v v13, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vl1re16.v v14, (a6) ; ZIP-NEXT: vl1re16.v v15, (a1) ; ZIP-NEXT: add a5, a0, a5 ; ZIP-NEXT: vs2r.v v20, (a5) ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8re16.v v16, (a2) ; ZIP-NEXT: vl8re16.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave7.nxv56bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6) ret %res } define @vector_interleave_nxv7f32_nxv1f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { ; CHECK-LABEL: vector_interleave_nxv7f32_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: add a5, a4, a2 ; CHECK-NEXT: add a6, a5, a2 ; CHECK-NEXT: vsetvli a7, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: add a7, a6, a2 ; CHECK-NEXT: vle32.v v8, (a7) ; CHECK-NEXT: vle32.v v10, (a6) ; CHECK-NEXT: srli a1, a1, 3 ; CHECK-NEXT: add a2, a7, a2 ; CHECK-NEXT: vle32.v v12, (a5) ; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v10, v8, a1 ; CHECK-NEXT: vsetvli a5, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v11, (a2) ; CHECK-NEXT: vle32.v v9, (a4) ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v12, a1 ; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v12, (a3) ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv7f32_nxv1f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: add a5, a4, a2 ; ZVBB-NEXT: add a6, a5, a2 ; ZVBB-NEXT: vsetvli a7, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vsseg7e32.v v8, (a0) ; ZVBB-NEXT: add a7, a6, a2 ; ZVBB-NEXT: vle32.v v8, (a7) ; ZVBB-NEXT: vle32.v v10, (a6) ; ZVBB-NEXT: srli a1, a1, 3 ; ZVBB-NEXT: add a2, a7, a2 ; ZVBB-NEXT: vle32.v v12, (a5) ; ZVBB-NEXT: vsetvli a5, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v10, v8, a1 ; ZVBB-NEXT: vsetvli a5, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vle32.v v11, (a2) ; ZVBB-NEXT: vle32.v v9, (a4) ; ZVBB-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v12, a1 ; ZVBB-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vle32.v v12, (a3) ; ZVBB-NEXT: vle32.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v12, a1 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave7.nxv7f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6) ret %res } define @vector_interleave_nxv14f32_nxv2f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { ; CHECK-LABEL: vector_interleave_nxv14f32_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 3 ; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e32, m1, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: vl1re32.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re32.v v11, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re32.v v8, (a0) ; CHECK-NEXT: add a0, a3, a1 ; CHECK-NEXT: vl1re32.v v9, (a2) ; CHECK-NEXT: vl1re32.v v12, (a3) ; CHECK-NEXT: vl1re32.v v13, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl1re32.v v14, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 3 ; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv14f32_nxv2f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 3 ; ZVBB-NEXT: sub a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e32, m1, ta, ma ; ZVBB-NEXT: vsseg7e32.v v8, (a0) ; ZVBB-NEXT: vl1re32.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re32.v v11, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re32.v v8, (a0) ; ZVBB-NEXT: add a0, a3, a1 ; ZVBB-NEXT: vl1re32.v v9, (a2) ; ZVBB-NEXT: vl1re32.v v12, (a3) ; ZVBB-NEXT: vl1re32.v v13, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl1re32.v v14, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 3 ; ZVBB-NEXT: sub a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave7.nxv14f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6) ret %res } define @vector_interleave_nxv28f32_nxv4f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { ; RV32-LABEL: vector_interleave_nxv28f32_nxv4f32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vmv2r.v v26, v20 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v24, v16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 3 ; RV32-NEXT: sub a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v20, v8 ; RV32-NEXT: vmv1r.v v1, v20 ; RV32-NEXT: vmv1r.v v3, v22 ; RV32-NEXT: vmv1r.v v5, v24 ; RV32-NEXT: vmv1r.v v7, v26 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v2, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: slli a5, a2, 2 ; RV32-NEXT: vmv1r.v v4, v14 ; RV32-NEXT: slli a6, a2, 4 ; RV32-NEXT: add a7, a4, a2 ; RV32-NEXT: vmv1r.v v6, v18 ; RV32-NEXT: sub a5, a6, a5 ; RV32-NEXT: vmv1r.v v22, v11 ; RV32-NEXT: add a6, a7, a2 ; RV32-NEXT: vmv1r.v v24, v15 ; RV32-NEXT: vsseg7e32.v v1, (a0) ; RV32-NEXT: vmv1r.v v26, v19 ; RV32-NEXT: vsseg7e32.v v21, (a1) ; RV32-NEXT: vl1re32.v v18, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v19, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v20, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v21, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1re32.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v11, (a6) ; RV32-NEXT: vl1re32.v v8, (a0) ; RV32-NEXT: vl1re32.v v16, (a4) ; RV32-NEXT: vl1re32.v v9, (a3) ; RV32-NEXT: vl1re32.v v17, (a7) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 14 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re32.v v13, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vl1re32.v v14, (a6) ; RV32-NEXT: vl1re32.v v15, (a1) ; RV32-NEXT: add a5, a0, a5 ; RV32-NEXT: vs2r.v v20, (a5) ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8re32.v v16, (a2) ; RV32-NEXT: vl8re32.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv28f32_nxv4f32: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 5 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vmv2r.v v26, v20 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v24, v16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 3 ; RV64-NEXT: sub a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v20, v8 ; RV64-NEXT: vmv1r.v v1, v20 ; RV64-NEXT: vmv1r.v v3, v22 ; RV64-NEXT: vmv1r.v v5, v24 ; RV64-NEXT: vmv1r.v v7, v26 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v2, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: slli a5, a2, 2 ; RV64-NEXT: vmv1r.v v4, v14 ; RV64-NEXT: slli a6, a2, 4 ; RV64-NEXT: add a7, a4, a2 ; RV64-NEXT: vmv1r.v v6, v18 ; RV64-NEXT: sub a5, a6, a5 ; RV64-NEXT: vmv1r.v v22, v11 ; RV64-NEXT: add a6, a7, a2 ; RV64-NEXT: vmv1r.v v24, v15 ; RV64-NEXT: vsseg7e32.v v1, (a0) ; RV64-NEXT: vmv1r.v v26, v19 ; RV64-NEXT: vsseg7e32.v v21, (a1) ; RV64-NEXT: vl1re32.v v18, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v19, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v20, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v21, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1re32.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v11, (a6) ; RV64-NEXT: vl1re32.v v8, (a0) ; RV64-NEXT: vl1re32.v v16, (a4) ; RV64-NEXT: vl1re32.v v9, (a3) ; RV64-NEXT: vl1re32.v v17, (a7) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 14 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re32.v v13, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vl1re32.v v14, (a6) ; RV64-NEXT: vl1re32.v v15, (a1) ; RV64-NEXT: add a5, a0, a5 ; RV64-NEXT: vs2r.v v20, (a5) ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8re32.v v16, (a2) ; RV64-NEXT: vl8re32.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv28f32_nxv4f32: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: slli a0, a0, 5 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v26, v20 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v24, v16 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 3 ; ZVBB-RV32-NEXT: sub a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v20, v8 ; ZVBB-RV32-NEXT: vmv1r.v v1, v20 ; ZVBB-RV32-NEXT: vmv1r.v v3, v22 ; ZVBB-RV32-NEXT: vmv1r.v v5, v24 ; ZVBB-RV32-NEXT: vmv1r.v v7, v26 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v2, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: slli a5, a2, 2 ; ZVBB-RV32-NEXT: vmv1r.v v4, v14 ; ZVBB-RV32-NEXT: slli a6, a2, 4 ; ZVBB-RV32-NEXT: add a7, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v6, v18 ; ZVBB-RV32-NEXT: sub a5, a6, a5 ; ZVBB-RV32-NEXT: vmv1r.v v22, v11 ; ZVBB-RV32-NEXT: add a6, a7, a2 ; ZVBB-RV32-NEXT: vmv1r.v v24, v15 ; ZVBB-RV32-NEXT: vsseg7e32.v v1, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v26, v19 ; ZVBB-RV32-NEXT: vsseg7e32.v v21, (a1) ; ZVBB-RV32-NEXT: vl1re32.v v18, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v19, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v20, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v21, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1re32.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v11, (a6) ; ZVBB-RV32-NEXT: vl1re32.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re32.v v16, (a4) ; ZVBB-RV32-NEXT: vl1re32.v v9, (a3) ; ZVBB-RV32-NEXT: vl1re32.v v17, (a7) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 14 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re32.v v13, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vl1re32.v v14, (a6) ; ZVBB-RV32-NEXT: vl1re32.v v15, (a1) ; ZVBB-RV32-NEXT: add a5, a0, a5 ; ZVBB-RV32-NEXT: vs2r.v v20, (a5) ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8re32.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re32.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv28f32_nxv4f32: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: slli a0, a0, 5 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v26, v20 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v24, v16 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 3 ; ZVBB-RV64-NEXT: sub a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v20, v8 ; ZVBB-RV64-NEXT: vmv1r.v v1, v20 ; ZVBB-RV64-NEXT: vmv1r.v v3, v22 ; ZVBB-RV64-NEXT: vmv1r.v v5, v24 ; ZVBB-RV64-NEXT: vmv1r.v v7, v26 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v2, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: slli a5, a2, 2 ; ZVBB-RV64-NEXT: vmv1r.v v4, v14 ; ZVBB-RV64-NEXT: slli a6, a2, 4 ; ZVBB-RV64-NEXT: add a7, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v6, v18 ; ZVBB-RV64-NEXT: sub a5, a6, a5 ; ZVBB-RV64-NEXT: vmv1r.v v22, v11 ; ZVBB-RV64-NEXT: add a6, a7, a2 ; ZVBB-RV64-NEXT: vmv1r.v v24, v15 ; ZVBB-RV64-NEXT: vsseg7e32.v v1, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v26, v19 ; ZVBB-RV64-NEXT: vsseg7e32.v v21, (a1) ; ZVBB-RV64-NEXT: vl1re32.v v18, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v19, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v20, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v21, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1re32.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v11, (a6) ; ZVBB-RV64-NEXT: vl1re32.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re32.v v16, (a4) ; ZVBB-RV64-NEXT: vl1re32.v v9, (a3) ; ZVBB-RV64-NEXT: vl1re32.v v17, (a7) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 14 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re32.v v13, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vl1re32.v v14, (a6) ; ZVBB-RV64-NEXT: vl1re32.v v15, (a1) ; ZVBB-RV64-NEXT: add a5, a0, a5 ; ZVBB-RV64-NEXT: vs2r.v v20, (a5) ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8re32.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re32.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv28f32_nxv4f32: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: slli a0, a0, 5 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZIP-NEXT: vmv2r.v v26, v20 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v24, v16 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 3 ; ZIP-NEXT: sub a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v20, v8 ; ZIP-NEXT: vmv1r.v v1, v20 ; ZIP-NEXT: vmv1r.v v3, v22 ; ZIP-NEXT: vmv1r.v v5, v24 ; ZIP-NEXT: vmv1r.v v7, v26 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v2, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: slli a5, a2, 2 ; ZIP-NEXT: vmv1r.v v4, v14 ; ZIP-NEXT: slli a6, a2, 4 ; ZIP-NEXT: add a7, a4, a2 ; ZIP-NEXT: vmv1r.v v6, v18 ; ZIP-NEXT: sub a5, a6, a5 ; ZIP-NEXT: vmv1r.v v22, v11 ; ZIP-NEXT: add a6, a7, a2 ; ZIP-NEXT: vmv1r.v v24, v15 ; ZIP-NEXT: vsseg7e32.v v1, (a0) ; ZIP-NEXT: vmv1r.v v26, v19 ; ZIP-NEXT: vsseg7e32.v v21, (a1) ; ZIP-NEXT: vl1re32.v v18, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v19, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v20, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v21, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1re32.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v11, (a6) ; ZIP-NEXT: vl1re32.v v8, (a0) ; ZIP-NEXT: vl1re32.v v16, (a4) ; ZIP-NEXT: vl1re32.v v9, (a3) ; ZIP-NEXT: vl1re32.v v17, (a7) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 14 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re32.v v13, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vl1re32.v v14, (a6) ; ZIP-NEXT: vl1re32.v v15, (a1) ; ZIP-NEXT: add a5, a0, a5 ; ZIP-NEXT: vs2r.v v20, (a5) ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8re32.v v16, (a2) ; ZIP-NEXT: vl8re32.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave7.nxv28f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6) ret %res } define @vector_interleave_nxv7f64_nxv1f64( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { ; CHECK-LABEL: vector_interleave_nxv7f64_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 3 ; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: vsetvli a4, zero, e64, m1, ta, ma ; CHECK-NEXT: vsseg7e64.v v8, (a0) ; CHECK-NEXT: vl1re64.v v10, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re64.v v11, (a3) ; CHECK-NEXT: add a3, a3, a1 ; CHECK-NEXT: vl1re64.v v8, (a0) ; CHECK-NEXT: add a0, a3, a1 ; CHECK-NEXT: vl1re64.v v9, (a2) ; CHECK-NEXT: vl1re64.v v12, (a3) ; CHECK-NEXT: vl1re64.v v13, (a0) ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vl1re64.v v14, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a1, a0, 3 ; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv7f64_nxv1f64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 3 ; ZVBB-NEXT: sub a0, a1, a0 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: vsetvli a4, zero, e64, m1, ta, ma ; ZVBB-NEXT: vsseg7e64.v v8, (a0) ; ZVBB-NEXT: vl1re64.v v10, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re64.v v11, (a3) ; ZVBB-NEXT: add a3, a3, a1 ; ZVBB-NEXT: vl1re64.v v8, (a0) ; ZVBB-NEXT: add a0, a3, a1 ; ZVBB-NEXT: vl1re64.v v9, (a2) ; ZVBB-NEXT: vl1re64.v v12, (a3) ; ZVBB-NEXT: vl1re64.v v13, (a0) ; ZVBB-NEXT: add a0, a0, a1 ; ZVBB-NEXT: vl1re64.v v14, (a0) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a1, a0, 3 ; ZVBB-NEXT: sub a0, a1, a0 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave7.nxv7f64( %v0, %v1, %v2, %v3, %v4, %v5, %v6) ret %res } define @vector_interleave_nxv14f64_nxv2f64( %v0, %v1, %v2, %v3, %v4, %v5, %v6) nounwind { ; RV32-LABEL: vector_interleave_nxv14f64_nxv2f64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -80 ; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; RV32-NEXT: addi s0, sp, 80 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vmv2r.v v26, v20 ; RV32-NEXT: addi a0, sp, 64 ; RV32-NEXT: vmv2r.v v24, v16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a2, a1, 3 ; RV32-NEXT: sub a1, a2, a1 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 64 ; RV32-NEXT: vmv2r.v v22, v12 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: vmv2r.v v20, v8 ; RV32-NEXT: vmv1r.v v1, v20 ; RV32-NEXT: vmv1r.v v3, v22 ; RV32-NEXT: vmv1r.v v5, v24 ; RV32-NEXT: vmv1r.v v7, v26 ; RV32-NEXT: add a3, a0, a2 ; RV32-NEXT: vmv1r.v v2, v10 ; RV32-NEXT: add a4, a1, a2 ; RV32-NEXT: slli a5, a2, 2 ; RV32-NEXT: vmv1r.v v4, v14 ; RV32-NEXT: slli a6, a2, 4 ; RV32-NEXT: add a7, a4, a2 ; RV32-NEXT: vmv1r.v v6, v18 ; RV32-NEXT: sub a5, a6, a5 ; RV32-NEXT: vmv1r.v v22, v11 ; RV32-NEXT: add a6, a7, a2 ; RV32-NEXT: vmv1r.v v24, v15 ; RV32-NEXT: vsseg7e64.v v1, (a0) ; RV32-NEXT: vmv1r.v v26, v19 ; RV32-NEXT: vsseg7e64.v v21, (a1) ; RV32-NEXT: vl1re64.v v18, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v19, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v20, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v21, (a6) ; RV32-NEXT: add a6, a3, a2 ; RV32-NEXT: vl1re64.v v10, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v11, (a6) ; RV32-NEXT: vl1re64.v v8, (a0) ; RV32-NEXT: vl1re64.v v16, (a4) ; RV32-NEXT: vl1re64.v v9, (a3) ; RV32-NEXT: vl1re64.v v17, (a7) ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a3, 14 ; RV32-NEXT: mul a0, a0, a3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 64 ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v12, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: vl1re64.v v13, (a6) ; RV32-NEXT: add a6, a6, a2 ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, a0, a2 ; RV32-NEXT: vl1re64.v v14, (a6) ; RV32-NEXT: vl1re64.v v15, (a1) ; RV32-NEXT: add a5, a0, a5 ; RV32-NEXT: vs2r.v v20, (a5) ; RV32-NEXT: vs4r.v v16, (a2) ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: vl8re64.v v16, (a2) ; RV32-NEXT: vl8re64.v v8, (a0) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 80 ; RV32-NEXT: ret ; ; RV64-LABEL: vector_interleave_nxv14f64_nxv2f64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -80 ; RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; RV64-NEXT: addi s0, sp, 80 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 5 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: andi sp, sp, -64 ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vmv2r.v v26, v20 ; RV64-NEXT: addi a0, sp, 64 ; RV64-NEXT: vmv2r.v v24, v16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a2, a1, 3 ; RV64-NEXT: sub a1, a2, a1 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 64 ; RV64-NEXT: vmv2r.v v22, v12 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: vmv2r.v v20, v8 ; RV64-NEXT: vmv1r.v v1, v20 ; RV64-NEXT: vmv1r.v v3, v22 ; RV64-NEXT: vmv1r.v v5, v24 ; RV64-NEXT: vmv1r.v v7, v26 ; RV64-NEXT: add a3, a0, a2 ; RV64-NEXT: vmv1r.v v2, v10 ; RV64-NEXT: add a4, a1, a2 ; RV64-NEXT: slli a5, a2, 2 ; RV64-NEXT: vmv1r.v v4, v14 ; RV64-NEXT: slli a6, a2, 4 ; RV64-NEXT: add a7, a4, a2 ; RV64-NEXT: vmv1r.v v6, v18 ; RV64-NEXT: sub a5, a6, a5 ; RV64-NEXT: vmv1r.v v22, v11 ; RV64-NEXT: add a6, a7, a2 ; RV64-NEXT: vmv1r.v v24, v15 ; RV64-NEXT: vsseg7e64.v v1, (a0) ; RV64-NEXT: vmv1r.v v26, v19 ; RV64-NEXT: vsseg7e64.v v21, (a1) ; RV64-NEXT: vl1re64.v v18, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v19, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v20, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v21, (a6) ; RV64-NEXT: add a6, a3, a2 ; RV64-NEXT: vl1re64.v v10, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v11, (a6) ; RV64-NEXT: vl1re64.v v8, (a0) ; RV64-NEXT: vl1re64.v v16, (a4) ; RV64-NEXT: vl1re64.v v9, (a3) ; RV64-NEXT: vl1re64.v v17, (a7) ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a3, 14 ; RV64-NEXT: mul a0, a0, a3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 64 ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v12, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: vl1re64.v v13, (a6) ; RV64-NEXT: add a6, a6, a2 ; RV64-NEXT: slli a2, a2, 3 ; RV64-NEXT: add a2, a0, a2 ; RV64-NEXT: vl1re64.v v14, (a6) ; RV64-NEXT: vl1re64.v v15, (a1) ; RV64-NEXT: add a5, a0, a5 ; RV64-NEXT: vs2r.v v20, (a5) ; RV64-NEXT: vs4r.v v16, (a2) ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: vl8re64.v v16, (a2) ; RV64-NEXT: vl8re64.v v8, (a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 80 ; RV64-NEXT: ret ; ; ZVBB-RV32-LABEL: vector_interleave_nxv14f64_nxv2f64: ; ZVBB-RV32: # %bb.0: ; ZVBB-RV32-NEXT: addi sp, sp, -80 ; ZVBB-RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill ; ZVBB-RV32-NEXT: addi s0, sp, 80 ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: slli a0, a0, 5 ; ZVBB-RV32-NEXT: sub sp, sp, a0 ; ZVBB-RV32-NEXT: andi sp, sp, -64 ; ZVBB-RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZVBB-RV32-NEXT: vmv2r.v v26, v20 ; ZVBB-RV32-NEXT: addi a0, sp, 64 ; ZVBB-RV32-NEXT: vmv2r.v v24, v16 ; ZVBB-RV32-NEXT: csrr a1, vlenb ; ZVBB-RV32-NEXT: slli a2, a1, 3 ; ZVBB-RV32-NEXT: sub a1, a2, a1 ; ZVBB-RV32-NEXT: add a1, sp, a1 ; ZVBB-RV32-NEXT: addi a1, a1, 64 ; ZVBB-RV32-NEXT: vmv2r.v v22, v12 ; ZVBB-RV32-NEXT: csrr a2, vlenb ; ZVBB-RV32-NEXT: vmv2r.v v20, v8 ; ZVBB-RV32-NEXT: vmv1r.v v1, v20 ; ZVBB-RV32-NEXT: vmv1r.v v3, v22 ; ZVBB-RV32-NEXT: vmv1r.v v5, v24 ; ZVBB-RV32-NEXT: vmv1r.v v7, v26 ; ZVBB-RV32-NEXT: add a3, a0, a2 ; ZVBB-RV32-NEXT: vmv1r.v v2, v10 ; ZVBB-RV32-NEXT: add a4, a1, a2 ; ZVBB-RV32-NEXT: slli a5, a2, 2 ; ZVBB-RV32-NEXT: vmv1r.v v4, v14 ; ZVBB-RV32-NEXT: slli a6, a2, 4 ; ZVBB-RV32-NEXT: add a7, a4, a2 ; ZVBB-RV32-NEXT: vmv1r.v v6, v18 ; ZVBB-RV32-NEXT: sub a5, a6, a5 ; ZVBB-RV32-NEXT: vmv1r.v v22, v11 ; ZVBB-RV32-NEXT: add a6, a7, a2 ; ZVBB-RV32-NEXT: vmv1r.v v24, v15 ; ZVBB-RV32-NEXT: vsseg7e64.v v1, (a0) ; ZVBB-RV32-NEXT: vmv1r.v v26, v19 ; ZVBB-RV32-NEXT: vsseg7e64.v v21, (a1) ; ZVBB-RV32-NEXT: vl1re64.v v18, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v19, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v20, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v21, (a6) ; ZVBB-RV32-NEXT: add a6, a3, a2 ; ZVBB-RV32-NEXT: vl1re64.v v10, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v11, (a6) ; ZVBB-RV32-NEXT: vl1re64.v v8, (a0) ; ZVBB-RV32-NEXT: vl1re64.v v16, (a4) ; ZVBB-RV32-NEXT: vl1re64.v v9, (a3) ; ZVBB-RV32-NEXT: vl1re64.v v17, (a7) ; ZVBB-RV32-NEXT: csrr a0, vlenb ; ZVBB-RV32-NEXT: li a3, 14 ; ZVBB-RV32-NEXT: mul a0, a0, a3 ; ZVBB-RV32-NEXT: add a0, sp, a0 ; ZVBB-RV32-NEXT: addi a0, a0, 64 ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v12, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: vl1re64.v v13, (a6) ; ZVBB-RV32-NEXT: add a6, a6, a2 ; ZVBB-RV32-NEXT: slli a2, a2, 3 ; ZVBB-RV32-NEXT: add a2, a0, a2 ; ZVBB-RV32-NEXT: vl1re64.v v14, (a6) ; ZVBB-RV32-NEXT: vl1re64.v v15, (a1) ; ZVBB-RV32-NEXT: add a5, a0, a5 ; ZVBB-RV32-NEXT: vs2r.v v20, (a5) ; ZVBB-RV32-NEXT: vs4r.v v16, (a2) ; ZVBB-RV32-NEXT: vs8r.v v8, (a0) ; ZVBB-RV32-NEXT: vl8re64.v v16, (a2) ; ZVBB-RV32-NEXT: vl8re64.v v8, (a0) ; ZVBB-RV32-NEXT: addi sp, s0, -80 ; ZVBB-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload ; ZVBB-RV32-NEXT: addi sp, sp, 80 ; ZVBB-RV32-NEXT: ret ; ; ZVBB-RV64-LABEL: vector_interleave_nxv14f64_nxv2f64: ; ZVBB-RV64: # %bb.0: ; ZVBB-RV64-NEXT: addi sp, sp, -80 ; ZVBB-RV64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZVBB-RV64-NEXT: addi s0, sp, 80 ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: slli a0, a0, 5 ; ZVBB-RV64-NEXT: sub sp, sp, a0 ; ZVBB-RV64-NEXT: andi sp, sp, -64 ; ZVBB-RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZVBB-RV64-NEXT: vmv2r.v v26, v20 ; ZVBB-RV64-NEXT: addi a0, sp, 64 ; ZVBB-RV64-NEXT: vmv2r.v v24, v16 ; ZVBB-RV64-NEXT: csrr a1, vlenb ; ZVBB-RV64-NEXT: slli a2, a1, 3 ; ZVBB-RV64-NEXT: sub a1, a2, a1 ; ZVBB-RV64-NEXT: add a1, sp, a1 ; ZVBB-RV64-NEXT: addi a1, a1, 64 ; ZVBB-RV64-NEXT: vmv2r.v v22, v12 ; ZVBB-RV64-NEXT: csrr a2, vlenb ; ZVBB-RV64-NEXT: vmv2r.v v20, v8 ; ZVBB-RV64-NEXT: vmv1r.v v1, v20 ; ZVBB-RV64-NEXT: vmv1r.v v3, v22 ; ZVBB-RV64-NEXT: vmv1r.v v5, v24 ; ZVBB-RV64-NEXT: vmv1r.v v7, v26 ; ZVBB-RV64-NEXT: add a3, a0, a2 ; ZVBB-RV64-NEXT: vmv1r.v v2, v10 ; ZVBB-RV64-NEXT: add a4, a1, a2 ; ZVBB-RV64-NEXT: slli a5, a2, 2 ; ZVBB-RV64-NEXT: vmv1r.v v4, v14 ; ZVBB-RV64-NEXT: slli a6, a2, 4 ; ZVBB-RV64-NEXT: add a7, a4, a2 ; ZVBB-RV64-NEXT: vmv1r.v v6, v18 ; ZVBB-RV64-NEXT: sub a5, a6, a5 ; ZVBB-RV64-NEXT: vmv1r.v v22, v11 ; ZVBB-RV64-NEXT: add a6, a7, a2 ; ZVBB-RV64-NEXT: vmv1r.v v24, v15 ; ZVBB-RV64-NEXT: vsseg7e64.v v1, (a0) ; ZVBB-RV64-NEXT: vmv1r.v v26, v19 ; ZVBB-RV64-NEXT: vsseg7e64.v v21, (a1) ; ZVBB-RV64-NEXT: vl1re64.v v18, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v19, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v20, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v21, (a6) ; ZVBB-RV64-NEXT: add a6, a3, a2 ; ZVBB-RV64-NEXT: vl1re64.v v10, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v11, (a6) ; ZVBB-RV64-NEXT: vl1re64.v v8, (a0) ; ZVBB-RV64-NEXT: vl1re64.v v16, (a4) ; ZVBB-RV64-NEXT: vl1re64.v v9, (a3) ; ZVBB-RV64-NEXT: vl1re64.v v17, (a7) ; ZVBB-RV64-NEXT: csrr a0, vlenb ; ZVBB-RV64-NEXT: li a3, 14 ; ZVBB-RV64-NEXT: mul a0, a0, a3 ; ZVBB-RV64-NEXT: add a0, sp, a0 ; ZVBB-RV64-NEXT: addi a0, a0, 64 ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v12, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: vl1re64.v v13, (a6) ; ZVBB-RV64-NEXT: add a6, a6, a2 ; ZVBB-RV64-NEXT: slli a2, a2, 3 ; ZVBB-RV64-NEXT: add a2, a0, a2 ; ZVBB-RV64-NEXT: vl1re64.v v14, (a6) ; ZVBB-RV64-NEXT: vl1re64.v v15, (a1) ; ZVBB-RV64-NEXT: add a5, a0, a5 ; ZVBB-RV64-NEXT: vs2r.v v20, (a5) ; ZVBB-RV64-NEXT: vs4r.v v16, (a2) ; ZVBB-RV64-NEXT: vs8r.v v8, (a0) ; ZVBB-RV64-NEXT: vl8re64.v v16, (a2) ; ZVBB-RV64-NEXT: vl8re64.v v8, (a0) ; ZVBB-RV64-NEXT: addi sp, s0, -80 ; ZVBB-RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZVBB-RV64-NEXT: addi sp, sp, 80 ; ZVBB-RV64-NEXT: ret ; ; ZIP-LABEL: vector_interleave_nxv14f64_nxv2f64: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -80 ; ZIP-NEXT: sd ra, 72(sp) # 8-byte Folded Spill ; ZIP-NEXT: sd s0, 64(sp) # 8-byte Folded Spill ; ZIP-NEXT: addi s0, sp, 80 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: slli a0, a0, 5 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: andi sp, sp, -64 ; ZIP-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZIP-NEXT: vmv2r.v v26, v20 ; ZIP-NEXT: addi a0, sp, 64 ; ZIP-NEXT: vmv2r.v v24, v16 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a2, a1, 3 ; ZIP-NEXT: sub a1, a2, a1 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 64 ; ZIP-NEXT: vmv2r.v v22, v12 ; ZIP-NEXT: csrr a2, vlenb ; ZIP-NEXT: vmv2r.v v20, v8 ; ZIP-NEXT: vmv1r.v v1, v20 ; ZIP-NEXT: vmv1r.v v3, v22 ; ZIP-NEXT: vmv1r.v v5, v24 ; ZIP-NEXT: vmv1r.v v7, v26 ; ZIP-NEXT: add a3, a0, a2 ; ZIP-NEXT: vmv1r.v v2, v10 ; ZIP-NEXT: add a4, a1, a2 ; ZIP-NEXT: slli a5, a2, 2 ; ZIP-NEXT: vmv1r.v v4, v14 ; ZIP-NEXT: slli a6, a2, 4 ; ZIP-NEXT: add a7, a4, a2 ; ZIP-NEXT: vmv1r.v v6, v18 ; ZIP-NEXT: sub a5, a6, a5 ; ZIP-NEXT: vmv1r.v v22, v11 ; ZIP-NEXT: add a6, a7, a2 ; ZIP-NEXT: vmv1r.v v24, v15 ; ZIP-NEXT: vsseg7e64.v v1, (a0) ; ZIP-NEXT: vmv1r.v v26, v19 ; ZIP-NEXT: vsseg7e64.v v21, (a1) ; ZIP-NEXT: vl1re64.v v18, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v19, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v20, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v21, (a6) ; ZIP-NEXT: add a6, a3, a2 ; ZIP-NEXT: vl1re64.v v10, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v11, (a6) ; ZIP-NEXT: vl1re64.v v8, (a0) ; ZIP-NEXT: vl1re64.v v16, (a4) ; ZIP-NEXT: vl1re64.v v9, (a3) ; ZIP-NEXT: vl1re64.v v17, (a7) ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a3, 14 ; ZIP-NEXT: mul a0, a0, a3 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 64 ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v12, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: vl1re64.v v13, (a6) ; ZIP-NEXT: add a6, a6, a2 ; ZIP-NEXT: slli a2, a2, 3 ; ZIP-NEXT: add a2, a0, a2 ; ZIP-NEXT: vl1re64.v v14, (a6) ; ZIP-NEXT: vl1re64.v v15, (a1) ; ZIP-NEXT: add a5, a0, a5 ; ZIP-NEXT: vs2r.v v20, (a5) ; ZIP-NEXT: vs4r.v v16, (a2) ; ZIP-NEXT: vs8r.v v8, (a0) ; ZIP-NEXT: vl8re64.v v16, (a2) ; ZIP-NEXT: vl8re64.v v8, (a0) ; ZIP-NEXT: addi sp, s0, -80 ; ZIP-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; ZIP-NEXT: ld s0, 64(sp) # 8-byte Folded Reload ; ZIP-NEXT: addi sp, sp, 80 ; ZIP-NEXT: ret %res = call @llvm.vector.interleave7.nxv14f64( %v0, %v1, %v2, %v3, %v4, %v5, %v6) ret %res } define @vector_interleave_nxv16f16_nxv2f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) nounwind { ; CHECK-LABEL: vector_interleave_nxv16f16_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: add a5, a4, a2 ; CHECK-NEXT: add a6, a5, a2 ; CHECK-NEXT: add a7, a6, a2 ; CHECK-NEXT: vsetvli t0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: add t0, a7, a2 ; CHECK-NEXT: add a2, t0, a2 ; CHECK-NEXT: vle16.v v11, (t0) ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vle16.v v9, (a7) ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v11, v8, a1 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v10, (a6) ; CHECK-NEXT: vle16.v v8, (a5) ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v10, v9, a1 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a4) ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v12, (a3) ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16f16_nxv2f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: add a5, a4, a2 ; ZVBB-NEXT: add a6, a5, a2 ; ZVBB-NEXT: add a7, a6, a2 ; ZVBB-NEXT: vsetvli t0, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vsseg8e16.v v8, (a0) ; ZVBB-NEXT: add t0, a7, a2 ; ZVBB-NEXT: add a2, t0, a2 ; ZVBB-NEXT: vle16.v v11, (t0) ; ZVBB-NEXT: vle16.v v8, (a2) ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vle16.v v9, (a7) ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v11, v8, a1 ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v10, (a6) ; ZVBB-NEXT: vle16.v v8, (a5) ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v10, v9, a1 ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v9, (a4) ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v8, a1 ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v12, (a3) ; ZVBB-NEXT: vle16.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v12, a1 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv16f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) ret %res } define @vector_interleave_nxv32f16_nxv4f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) nounwind { ; CHECK-LABEL: vector_interleave_nxv32f16_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: add a4, a3, a1 ; CHECK-NEXT: add a5, a4, a1 ; CHECK-NEXT: add a6, a5, a1 ; CHECK-NEXT: add a7, a6, a1 ; CHECK-NEXT: vsetvli t0, zero, e16, m1, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v14, (a7) ; CHECK-NEXT: add a1, a7, a1 ; CHECK-NEXT: vl1re16.v v15, (a1) ; CHECK-NEXT: vl1re16.v v12, (a5) ; CHECK-NEXT: vl1re16.v v13, (a6) ; CHECK-NEXT: vl1re16.v v10, (a3) ; CHECK-NEXT: vl1re16.v v11, (a4) ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v9, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32f16_nxv4f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: add a4, a3, a1 ; ZVBB-NEXT: add a5, a4, a1 ; ZVBB-NEXT: add a6, a5, a1 ; ZVBB-NEXT: add a7, a6, a1 ; ZVBB-NEXT: vsetvli t0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vsseg8e16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v14, (a7) ; ZVBB-NEXT: add a1, a7, a1 ; ZVBB-NEXT: vl1re16.v v15, (a1) ; ZVBB-NEXT: vl1re16.v v12, (a5) ; ZVBB-NEXT: vl1re16.v v13, (a6) ; ZVBB-NEXT: vl1re16.v v10, (a3) ; ZVBB-NEXT: vl1re16.v v11, (a4) ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v9, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv32f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) ret %res } define @vector_interleave_nxv64f16_nxv8f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) nounwind { ; CHECK-LABEL: vector_interleave_nxv64f16_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv2r.v v28, v22 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vmv2r.v v26, v18 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: add a2, a0, a3 ; CHECK-NEXT: add a4, a1, a3 ; CHECK-NEXT: add a5, a2, a3 ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv2r.v v24, v14 ; CHECK-NEXT: add a6, a4, a3 ; CHECK-NEXT: vmv2r.v v22, v10 ; CHECK-NEXT: vmv1r.v v2, v22 ; CHECK-NEXT: add a7, a5, a3 ; CHECK-NEXT: vmv1r.v v3, v12 ; CHECK-NEXT: add t0, a6, a3 ; CHECK-NEXT: vmv1r.v v4, v24 ; CHECK-NEXT: add t1, a7, a3 ; CHECK-NEXT: vmv1r.v v5, v16 ; CHECK-NEXT: add t2, t0, a3 ; CHECK-NEXT: vmv1r.v v6, v26 ; CHECK-NEXT: add t3, t1, a3 ; CHECK-NEXT: vmv1r.v v7, v20 ; CHECK-NEXT: add t4, t2, a3 ; CHECK-NEXT: vmv1r.v v8, v28 ; CHECK-NEXT: vmv1r.v v22, v9 ; CHECK-NEXT: add t5, t3, a3 ; CHECK-NEXT: vmv1r.v v24, v13 ; CHECK-NEXT: add t6, t4, a3 ; CHECK-NEXT: vmv1r.v v26, v17 ; CHECK-NEXT: vsseg8e16.v v1, (a0) ; CHECK-NEXT: vmv1r.v v28, v21 ; CHECK-NEXT: vsseg8e16.v v22, (a1) ; CHECK-NEXT: vl1re16.v v14, (t5) ; CHECK-NEXT: add t5, t5, a3 ; CHECK-NEXT: add a3, t6, a3 ; CHECK-NEXT: vl1re16.v v22, (t6) ; CHECK-NEXT: vl1re16.v v15, (t5) ; CHECK-NEXT: vl1re16.v v23, (a3) ; CHECK-NEXT: vl1re16.v v12, (t1) ; CHECK-NEXT: vl1re16.v v20, (t2) ; CHECK-NEXT: vl1re16.v v13, (t3) ; CHECK-NEXT: vl1re16.v v21, (t4) ; CHECK-NEXT: vl1re16.v v10, (a5) ; CHECK-NEXT: vl1re16.v v18, (a6) ; CHECK-NEXT: vl1re16.v v11, (a7) ; CHECK-NEXT: vl1re16.v v19, (t0) ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v16, (a1) ; CHECK-NEXT: vl1re16.v v9, (a2) ; CHECK-NEXT: vl1re16.v v17, (a4) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv64f16_nxv8f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vmv2r.v v28, v22 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: vmv2r.v v26, v18 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 3 ; ZVBB-NEXT: add a1, sp, a1 ; ZVBB-NEXT: addi a1, a1, 16 ; ZVBB-NEXT: csrr a3, vlenb ; ZVBB-NEXT: add a2, a0, a3 ; ZVBB-NEXT: add a4, a1, a3 ; ZVBB-NEXT: add a5, a2, a3 ; ZVBB-NEXT: vmv1r.v v1, v8 ; ZVBB-NEXT: vmv2r.v v24, v14 ; ZVBB-NEXT: add a6, a4, a3 ; ZVBB-NEXT: vmv2r.v v22, v10 ; ZVBB-NEXT: vmv1r.v v2, v22 ; ZVBB-NEXT: add a7, a5, a3 ; ZVBB-NEXT: vmv1r.v v3, v12 ; ZVBB-NEXT: add t0, a6, a3 ; ZVBB-NEXT: vmv1r.v v4, v24 ; ZVBB-NEXT: add t1, a7, a3 ; ZVBB-NEXT: vmv1r.v v5, v16 ; ZVBB-NEXT: add t2, t0, a3 ; ZVBB-NEXT: vmv1r.v v6, v26 ; ZVBB-NEXT: add t3, t1, a3 ; ZVBB-NEXT: vmv1r.v v7, v20 ; ZVBB-NEXT: add t4, t2, a3 ; ZVBB-NEXT: vmv1r.v v8, v28 ; ZVBB-NEXT: vmv1r.v v22, v9 ; ZVBB-NEXT: add t5, t3, a3 ; ZVBB-NEXT: vmv1r.v v24, v13 ; ZVBB-NEXT: add t6, t4, a3 ; ZVBB-NEXT: vmv1r.v v26, v17 ; ZVBB-NEXT: vsseg8e16.v v1, (a0) ; ZVBB-NEXT: vmv1r.v v28, v21 ; ZVBB-NEXT: vsseg8e16.v v22, (a1) ; ZVBB-NEXT: vl1re16.v v14, (t5) ; ZVBB-NEXT: add t5, t5, a3 ; ZVBB-NEXT: add a3, t6, a3 ; ZVBB-NEXT: vl1re16.v v22, (t6) ; ZVBB-NEXT: vl1re16.v v15, (t5) ; ZVBB-NEXT: vl1re16.v v23, (a3) ; ZVBB-NEXT: vl1re16.v v12, (t1) ; ZVBB-NEXT: vl1re16.v v20, (t2) ; ZVBB-NEXT: vl1re16.v v13, (t3) ; ZVBB-NEXT: vl1re16.v v21, (t4) ; ZVBB-NEXT: vl1re16.v v10, (a5) ; ZVBB-NEXT: vl1re16.v v18, (a6) ; ZVBB-NEXT: vl1re16.v v11, (a7) ; ZVBB-NEXT: vl1re16.v v19, (t0) ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v16, (a1) ; ZVBB-NEXT: vl1re16.v v9, (a2) ; ZVBB-NEXT: vl1re16.v v17, (a4) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv64f16( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) ret %res } define @vector_interleave_nxv16bf16_nxv2bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) nounwind { ; CHECK-LABEL: vector_interleave_nxv16bf16_nxv2bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: add a5, a4, a2 ; CHECK-NEXT: add a6, a5, a2 ; CHECK-NEXT: add a7, a6, a2 ; CHECK-NEXT: vsetvli t0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: add t0, a7, a2 ; CHECK-NEXT: add a2, t0, a2 ; CHECK-NEXT: vle16.v v11, (t0) ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: srli a1, a1, 2 ; CHECK-NEXT: vle16.v v9, (a7) ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v11, v8, a1 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v10, (a6) ; CHECK-NEXT: vle16.v v8, (a5) ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v10, v9, a1 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a4) ; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v12, (a3) ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16bf16_nxv2bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: add a5, a4, a2 ; ZVBB-NEXT: add a6, a5, a2 ; ZVBB-NEXT: add a7, a6, a2 ; ZVBB-NEXT: vsetvli t0, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vsseg8e16.v v8, (a0) ; ZVBB-NEXT: add t0, a7, a2 ; ZVBB-NEXT: add a2, t0, a2 ; ZVBB-NEXT: vle16.v v11, (t0) ; ZVBB-NEXT: vle16.v v8, (a2) ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vle16.v v9, (a7) ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v11, v8, a1 ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v10, (a6) ; ZVBB-NEXT: vle16.v v8, (a5) ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v10, v9, a1 ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v9, (a4) ; ZVBB-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v8, a1 ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vle16.v v12, (a3) ; ZVBB-NEXT: vle16.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v12, a1 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv16bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) ret %res } define @vector_interleave_nxv32bf16_nxv4bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) nounwind { ; CHECK-LABEL: vector_interleave_nxv32bf16_nxv4bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: add a4, a3, a1 ; CHECK-NEXT: add a5, a4, a1 ; CHECK-NEXT: add a6, a5, a1 ; CHECK-NEXT: add a7, a6, a1 ; CHECK-NEXT: vsetvli t0, zero, e16, m1, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v14, (a7) ; CHECK-NEXT: add a1, a7, a1 ; CHECK-NEXT: vl1re16.v v15, (a1) ; CHECK-NEXT: vl1re16.v v12, (a5) ; CHECK-NEXT: vl1re16.v v13, (a6) ; CHECK-NEXT: vl1re16.v v10, (a3) ; CHECK-NEXT: vl1re16.v v11, (a4) ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v9, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32bf16_nxv4bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: add a4, a3, a1 ; ZVBB-NEXT: add a5, a4, a1 ; ZVBB-NEXT: add a6, a5, a1 ; ZVBB-NEXT: add a7, a6, a1 ; ZVBB-NEXT: vsetvli t0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vsseg8e16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v14, (a7) ; ZVBB-NEXT: add a1, a7, a1 ; ZVBB-NEXT: vl1re16.v v15, (a1) ; ZVBB-NEXT: vl1re16.v v12, (a5) ; ZVBB-NEXT: vl1re16.v v13, (a6) ; ZVBB-NEXT: vl1re16.v v10, (a3) ; ZVBB-NEXT: vl1re16.v v11, (a4) ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v9, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv32bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) ret %res } define @vector_interleave_nxv64bf16_nxv8bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) nounwind { ; CHECK-LABEL: vector_interleave_nxv64bf16_nxv8bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv2r.v v28, v22 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vmv2r.v v26, v18 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: add a2, a0, a3 ; CHECK-NEXT: add a4, a1, a3 ; CHECK-NEXT: add a5, a2, a3 ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv2r.v v24, v14 ; CHECK-NEXT: add a6, a4, a3 ; CHECK-NEXT: vmv2r.v v22, v10 ; CHECK-NEXT: vmv1r.v v2, v22 ; CHECK-NEXT: add a7, a5, a3 ; CHECK-NEXT: vmv1r.v v3, v12 ; CHECK-NEXT: add t0, a6, a3 ; CHECK-NEXT: vmv1r.v v4, v24 ; CHECK-NEXT: add t1, a7, a3 ; CHECK-NEXT: vmv1r.v v5, v16 ; CHECK-NEXT: add t2, t0, a3 ; CHECK-NEXT: vmv1r.v v6, v26 ; CHECK-NEXT: add t3, t1, a3 ; CHECK-NEXT: vmv1r.v v7, v20 ; CHECK-NEXT: add t4, t2, a3 ; CHECK-NEXT: vmv1r.v v8, v28 ; CHECK-NEXT: vmv1r.v v22, v9 ; CHECK-NEXT: add t5, t3, a3 ; CHECK-NEXT: vmv1r.v v24, v13 ; CHECK-NEXT: add t6, t4, a3 ; CHECK-NEXT: vmv1r.v v26, v17 ; CHECK-NEXT: vsseg8e16.v v1, (a0) ; CHECK-NEXT: vmv1r.v v28, v21 ; CHECK-NEXT: vsseg8e16.v v22, (a1) ; CHECK-NEXT: vl1re16.v v14, (t5) ; CHECK-NEXT: add t5, t5, a3 ; CHECK-NEXT: add a3, t6, a3 ; CHECK-NEXT: vl1re16.v v22, (t6) ; CHECK-NEXT: vl1re16.v v15, (t5) ; CHECK-NEXT: vl1re16.v v23, (a3) ; CHECK-NEXT: vl1re16.v v12, (t1) ; CHECK-NEXT: vl1re16.v v20, (t2) ; CHECK-NEXT: vl1re16.v v13, (t3) ; CHECK-NEXT: vl1re16.v v21, (t4) ; CHECK-NEXT: vl1re16.v v10, (a5) ; CHECK-NEXT: vl1re16.v v18, (a6) ; CHECK-NEXT: vl1re16.v v11, (a7) ; CHECK-NEXT: vl1re16.v v19, (t0) ; CHECK-NEXT: vl1re16.v v8, (a0) ; CHECK-NEXT: vl1re16.v v16, (a1) ; CHECK-NEXT: vl1re16.v v9, (a2) ; CHECK-NEXT: vl1re16.v v17, (a4) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv64bf16_nxv8bf16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vmv2r.v v28, v22 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: vmv2r.v v26, v18 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 3 ; ZVBB-NEXT: add a1, sp, a1 ; ZVBB-NEXT: addi a1, a1, 16 ; ZVBB-NEXT: csrr a3, vlenb ; ZVBB-NEXT: add a2, a0, a3 ; ZVBB-NEXT: add a4, a1, a3 ; ZVBB-NEXT: add a5, a2, a3 ; ZVBB-NEXT: vmv1r.v v1, v8 ; ZVBB-NEXT: vmv2r.v v24, v14 ; ZVBB-NEXT: add a6, a4, a3 ; ZVBB-NEXT: vmv2r.v v22, v10 ; ZVBB-NEXT: vmv1r.v v2, v22 ; ZVBB-NEXT: add a7, a5, a3 ; ZVBB-NEXT: vmv1r.v v3, v12 ; ZVBB-NEXT: add t0, a6, a3 ; ZVBB-NEXT: vmv1r.v v4, v24 ; ZVBB-NEXT: add t1, a7, a3 ; ZVBB-NEXT: vmv1r.v v5, v16 ; ZVBB-NEXT: add t2, t0, a3 ; ZVBB-NEXT: vmv1r.v v6, v26 ; ZVBB-NEXT: add t3, t1, a3 ; ZVBB-NEXT: vmv1r.v v7, v20 ; ZVBB-NEXT: add t4, t2, a3 ; ZVBB-NEXT: vmv1r.v v8, v28 ; ZVBB-NEXT: vmv1r.v v22, v9 ; ZVBB-NEXT: add t5, t3, a3 ; ZVBB-NEXT: vmv1r.v v24, v13 ; ZVBB-NEXT: add t6, t4, a3 ; ZVBB-NEXT: vmv1r.v v26, v17 ; ZVBB-NEXT: vsseg8e16.v v1, (a0) ; ZVBB-NEXT: vmv1r.v v28, v21 ; ZVBB-NEXT: vsseg8e16.v v22, (a1) ; ZVBB-NEXT: vl1re16.v v14, (t5) ; ZVBB-NEXT: add t5, t5, a3 ; ZVBB-NEXT: add a3, t6, a3 ; ZVBB-NEXT: vl1re16.v v22, (t6) ; ZVBB-NEXT: vl1re16.v v15, (t5) ; ZVBB-NEXT: vl1re16.v v23, (a3) ; ZVBB-NEXT: vl1re16.v v12, (t1) ; ZVBB-NEXT: vl1re16.v v20, (t2) ; ZVBB-NEXT: vl1re16.v v13, (t3) ; ZVBB-NEXT: vl1re16.v v21, (t4) ; ZVBB-NEXT: vl1re16.v v10, (a5) ; ZVBB-NEXT: vl1re16.v v18, (a6) ; ZVBB-NEXT: vl1re16.v v11, (a7) ; ZVBB-NEXT: vl1re16.v v19, (t0) ; ZVBB-NEXT: vl1re16.v v8, (a0) ; ZVBB-NEXT: vl1re16.v v16, (a1) ; ZVBB-NEXT: vl1re16.v v9, (a2) ; ZVBB-NEXT: vl1re16.v v17, (a4) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv64bf16( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) ret %res } define @vector_interleave_nxv8f32_nxv1f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v8) nounwind { ; CHECK-LABEL: vector_interleave_nxv8f32_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 1 ; CHECK-NEXT: add a3, a0, a2 ; CHECK-NEXT: add a4, a3, a2 ; CHECK-NEXT: add a5, a4, a2 ; CHECK-NEXT: add a6, a5, a2 ; CHECK-NEXT: add a7, a6, a2 ; CHECK-NEXT: vsetvli t0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: add t0, a7, a2 ; CHECK-NEXT: add a2, t0, a2 ; CHECK-NEXT: vle32.v v11, (t0) ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: srli a1, a1, 3 ; CHECK-NEXT: vle32.v v9, (a7) ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v11, v8, a1 ; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v10, (a6) ; CHECK-NEXT: vle32.v v8, (a5) ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v10, v9, a1 ; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a4) ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v8, a1 ; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v12, (a3) ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8f32_nxv1f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: srli a2, a1, 1 ; ZVBB-NEXT: add a3, a0, a2 ; ZVBB-NEXT: add a4, a3, a2 ; ZVBB-NEXT: add a5, a4, a2 ; ZVBB-NEXT: add a6, a5, a2 ; ZVBB-NEXT: add a7, a6, a2 ; ZVBB-NEXT: vsetvli t0, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vsseg8e32.v v8, (a0) ; ZVBB-NEXT: add t0, a7, a2 ; ZVBB-NEXT: add a2, t0, a2 ; ZVBB-NEXT: vle32.v v11, (t0) ; ZVBB-NEXT: vle32.v v8, (a2) ; ZVBB-NEXT: srli a1, a1, 3 ; ZVBB-NEXT: vle32.v v9, (a7) ; ZVBB-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v11, v8, a1 ; ZVBB-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vle32.v v10, (a6) ; ZVBB-NEXT: vle32.v v8, (a5) ; ZVBB-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v10, v9, a1 ; ZVBB-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vle32.v v9, (a4) ; ZVBB-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v9, v8, a1 ; ZVBB-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; ZVBB-NEXT: vle32.v v12, (a3) ; ZVBB-NEXT: vle32.v v8, (a0) ; ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-NEXT: vslideup.vx v8, v12, a1 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 2 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv8f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v8) ret %res } define @vector_interleave_nxv16f32_nxv2f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) nounwind { ; CHECK-LABEL: vector_interleave_nxv16f32_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: add a4, a3, a1 ; CHECK-NEXT: add a5, a4, a1 ; CHECK-NEXT: add a6, a5, a1 ; CHECK-NEXT: add a7, a6, a1 ; CHECK-NEXT: vsetvli t0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: vl1re32.v v14, (a7) ; CHECK-NEXT: add a1, a7, a1 ; CHECK-NEXT: vl1re32.v v15, (a1) ; CHECK-NEXT: vl1re32.v v12, (a5) ; CHECK-NEXT: vl1re32.v v13, (a6) ; CHECK-NEXT: vl1re32.v v10, (a3) ; CHECK-NEXT: vl1re32.v v11, (a4) ; CHECK-NEXT: vl1re32.v v8, (a0) ; CHECK-NEXT: vl1re32.v v9, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16f32_nxv2f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: add a4, a3, a1 ; ZVBB-NEXT: add a5, a4, a1 ; ZVBB-NEXT: add a6, a5, a1 ; ZVBB-NEXT: add a7, a6, a1 ; ZVBB-NEXT: vsetvli t0, zero, e32, m1, ta, ma ; ZVBB-NEXT: vsseg8e32.v v8, (a0) ; ZVBB-NEXT: vl1re32.v v14, (a7) ; ZVBB-NEXT: add a1, a7, a1 ; ZVBB-NEXT: vl1re32.v v15, (a1) ; ZVBB-NEXT: vl1re32.v v12, (a5) ; ZVBB-NEXT: vl1re32.v v13, (a6) ; ZVBB-NEXT: vl1re32.v v10, (a3) ; ZVBB-NEXT: vl1re32.v v11, (a4) ; ZVBB-NEXT: vl1re32.v v8, (a0) ; ZVBB-NEXT: vl1re32.v v9, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv16f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) ret %res } define @vector_interleave_nxv32f32_nxv4f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) nounwind { ; CHECK-LABEL: vector_interleave_nxv32f32_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv2r.v v28, v22 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vmv2r.v v26, v18 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: add a2, a0, a3 ; CHECK-NEXT: add a4, a1, a3 ; CHECK-NEXT: add a5, a2, a3 ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv2r.v v24, v14 ; CHECK-NEXT: add a6, a4, a3 ; CHECK-NEXT: vmv2r.v v22, v10 ; CHECK-NEXT: vmv1r.v v2, v22 ; CHECK-NEXT: add a7, a5, a3 ; CHECK-NEXT: vmv1r.v v3, v12 ; CHECK-NEXT: add t0, a6, a3 ; CHECK-NEXT: vmv1r.v v4, v24 ; CHECK-NEXT: add t1, a7, a3 ; CHECK-NEXT: vmv1r.v v5, v16 ; CHECK-NEXT: add t2, t0, a3 ; CHECK-NEXT: vmv1r.v v6, v26 ; CHECK-NEXT: add t3, t1, a3 ; CHECK-NEXT: vmv1r.v v7, v20 ; CHECK-NEXT: add t4, t2, a3 ; CHECK-NEXT: vmv1r.v v8, v28 ; CHECK-NEXT: vmv1r.v v22, v9 ; CHECK-NEXT: add t5, t3, a3 ; CHECK-NEXT: vmv1r.v v24, v13 ; CHECK-NEXT: add t6, t4, a3 ; CHECK-NEXT: vmv1r.v v26, v17 ; CHECK-NEXT: vsseg8e32.v v1, (a0) ; CHECK-NEXT: vmv1r.v v28, v21 ; CHECK-NEXT: vsseg8e32.v v22, (a1) ; CHECK-NEXT: vl1re32.v v14, (t5) ; CHECK-NEXT: add t5, t5, a3 ; CHECK-NEXT: add a3, t6, a3 ; CHECK-NEXT: vl1re32.v v22, (t6) ; CHECK-NEXT: vl1re32.v v15, (t5) ; CHECK-NEXT: vl1re32.v v23, (a3) ; CHECK-NEXT: vl1re32.v v12, (t1) ; CHECK-NEXT: vl1re32.v v20, (t2) ; CHECK-NEXT: vl1re32.v v13, (t3) ; CHECK-NEXT: vl1re32.v v21, (t4) ; CHECK-NEXT: vl1re32.v v10, (a5) ; CHECK-NEXT: vl1re32.v v18, (a6) ; CHECK-NEXT: vl1re32.v v11, (a7) ; CHECK-NEXT: vl1re32.v v19, (t0) ; CHECK-NEXT: vl1re32.v v8, (a0) ; CHECK-NEXT: vl1re32.v v16, (a1) ; CHECK-NEXT: vl1re32.v v9, (a2) ; CHECK-NEXT: vl1re32.v v17, (a4) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32f32_nxv4f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; ZVBB-NEXT: vmv2r.v v28, v22 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: vmv2r.v v26, v18 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 3 ; ZVBB-NEXT: add a1, sp, a1 ; ZVBB-NEXT: addi a1, a1, 16 ; ZVBB-NEXT: csrr a3, vlenb ; ZVBB-NEXT: add a2, a0, a3 ; ZVBB-NEXT: add a4, a1, a3 ; ZVBB-NEXT: add a5, a2, a3 ; ZVBB-NEXT: vmv1r.v v1, v8 ; ZVBB-NEXT: vmv2r.v v24, v14 ; ZVBB-NEXT: add a6, a4, a3 ; ZVBB-NEXT: vmv2r.v v22, v10 ; ZVBB-NEXT: vmv1r.v v2, v22 ; ZVBB-NEXT: add a7, a5, a3 ; ZVBB-NEXT: vmv1r.v v3, v12 ; ZVBB-NEXT: add t0, a6, a3 ; ZVBB-NEXT: vmv1r.v v4, v24 ; ZVBB-NEXT: add t1, a7, a3 ; ZVBB-NEXT: vmv1r.v v5, v16 ; ZVBB-NEXT: add t2, t0, a3 ; ZVBB-NEXT: vmv1r.v v6, v26 ; ZVBB-NEXT: add t3, t1, a3 ; ZVBB-NEXT: vmv1r.v v7, v20 ; ZVBB-NEXT: add t4, t2, a3 ; ZVBB-NEXT: vmv1r.v v8, v28 ; ZVBB-NEXT: vmv1r.v v22, v9 ; ZVBB-NEXT: add t5, t3, a3 ; ZVBB-NEXT: vmv1r.v v24, v13 ; ZVBB-NEXT: add t6, t4, a3 ; ZVBB-NEXT: vmv1r.v v26, v17 ; ZVBB-NEXT: vsseg8e32.v v1, (a0) ; ZVBB-NEXT: vmv1r.v v28, v21 ; ZVBB-NEXT: vsseg8e32.v v22, (a1) ; ZVBB-NEXT: vl1re32.v v14, (t5) ; ZVBB-NEXT: add t5, t5, a3 ; ZVBB-NEXT: add a3, t6, a3 ; ZVBB-NEXT: vl1re32.v v22, (t6) ; ZVBB-NEXT: vl1re32.v v15, (t5) ; ZVBB-NEXT: vl1re32.v v23, (a3) ; ZVBB-NEXT: vl1re32.v v12, (t1) ; ZVBB-NEXT: vl1re32.v v20, (t2) ; ZVBB-NEXT: vl1re32.v v13, (t3) ; ZVBB-NEXT: vl1re32.v v21, (t4) ; ZVBB-NEXT: vl1re32.v v10, (a5) ; ZVBB-NEXT: vl1re32.v v18, (a6) ; ZVBB-NEXT: vl1re32.v v11, (a7) ; ZVBB-NEXT: vl1re32.v v19, (t0) ; ZVBB-NEXT: vl1re32.v v8, (a0) ; ZVBB-NEXT: vl1re32.v v16, (a1) ; ZVBB-NEXT: vl1re32.v v9, (a2) ; ZVBB-NEXT: vl1re32.v v17, (a4) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv32f32( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) ret %res } define @vector_interleave_nxv8f64_nxv1f64( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v8) nounwind { ; CHECK-LABEL: vector_interleave_nxv8f64_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add a2, a0, a1 ; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: add a4, a3, a1 ; CHECK-NEXT: add a5, a4, a1 ; CHECK-NEXT: add a6, a5, a1 ; CHECK-NEXT: add a7, a6, a1 ; CHECK-NEXT: vsetvli t0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsseg8e64.v v8, (a0) ; CHECK-NEXT: vl1re64.v v14, (a7) ; CHECK-NEXT: add a1, a7, a1 ; CHECK-NEXT: vl1re64.v v15, (a1) ; CHECK-NEXT: vl1re64.v v12, (a5) ; CHECK-NEXT: vl1re64.v v13, (a6) ; CHECK-NEXT: vl1re64.v v10, (a3) ; CHECK-NEXT: vl1re64.v v11, (a4) ; CHECK-NEXT: vl1re64.v v8, (a0) ; CHECK-NEXT: vl1re64.v v9, (a2) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8f64_nxv1f64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: add a2, a0, a1 ; ZVBB-NEXT: add a3, a2, a1 ; ZVBB-NEXT: add a4, a3, a1 ; ZVBB-NEXT: add a5, a4, a1 ; ZVBB-NEXT: add a6, a5, a1 ; ZVBB-NEXT: add a7, a6, a1 ; ZVBB-NEXT: vsetvli t0, zero, e64, m1, ta, ma ; ZVBB-NEXT: vsseg8e64.v v8, (a0) ; ZVBB-NEXT: vl1re64.v v14, (a7) ; ZVBB-NEXT: add a1, a7, a1 ; ZVBB-NEXT: vl1re64.v v15, (a1) ; ZVBB-NEXT: vl1re64.v v12, (a5) ; ZVBB-NEXT: vl1re64.v v13, (a6) ; ZVBB-NEXT: vl1re64.v v10, (a3) ; ZVBB-NEXT: vl1re64.v v11, (a4) ; ZVBB-NEXT: vl1re64.v v8, (a0) ; ZVBB-NEXT: vl1re64.v v9, (a2) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv8f64( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v8) ret %res } define @vector_interleave_nxv16f64_nxv2f64( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) nounwind { ; CHECK-LABEL: vector_interleave_nxv16f64_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv2r.v v28, v22 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vmv2r.v v26, v18 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: add a2, a0, a3 ; CHECK-NEXT: add a4, a1, a3 ; CHECK-NEXT: add a5, a2, a3 ; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv2r.v v24, v14 ; CHECK-NEXT: add a6, a4, a3 ; CHECK-NEXT: vmv2r.v v22, v10 ; CHECK-NEXT: vmv1r.v v2, v22 ; CHECK-NEXT: add a7, a5, a3 ; CHECK-NEXT: vmv1r.v v3, v12 ; CHECK-NEXT: add t0, a6, a3 ; CHECK-NEXT: vmv1r.v v4, v24 ; CHECK-NEXT: add t1, a7, a3 ; CHECK-NEXT: vmv1r.v v5, v16 ; CHECK-NEXT: add t2, t0, a3 ; CHECK-NEXT: vmv1r.v v6, v26 ; CHECK-NEXT: add t3, t1, a3 ; CHECK-NEXT: vmv1r.v v7, v20 ; CHECK-NEXT: add t4, t2, a3 ; CHECK-NEXT: vmv1r.v v8, v28 ; CHECK-NEXT: vmv1r.v v22, v9 ; CHECK-NEXT: add t5, t3, a3 ; CHECK-NEXT: vmv1r.v v24, v13 ; CHECK-NEXT: add t6, t4, a3 ; CHECK-NEXT: vmv1r.v v26, v17 ; CHECK-NEXT: vsseg8e64.v v1, (a0) ; CHECK-NEXT: vmv1r.v v28, v21 ; CHECK-NEXT: vsseg8e64.v v22, (a1) ; CHECK-NEXT: vl1re64.v v14, (t5) ; CHECK-NEXT: add t5, t5, a3 ; CHECK-NEXT: add a3, t6, a3 ; CHECK-NEXT: vl1re64.v v22, (t6) ; CHECK-NEXT: vl1re64.v v15, (t5) ; CHECK-NEXT: vl1re64.v v23, (a3) ; CHECK-NEXT: vl1re64.v v12, (t1) ; CHECK-NEXT: vl1re64.v v20, (t2) ; CHECK-NEXT: vl1re64.v v13, (t3) ; CHECK-NEXT: vl1re64.v v21, (t4) ; CHECK-NEXT: vl1re64.v v10, (a5) ; CHECK-NEXT: vl1re64.v v18, (a6) ; CHECK-NEXT: vl1re64.v v11, (a7) ; CHECK-NEXT: vl1re64.v v19, (t0) ; CHECK-NEXT: vl1re64.v v8, (a0) ; CHECK-NEXT: vl1re64.v v16, (a1) ; CHECK-NEXT: vl1re64.v v9, (a2) ; CHECK-NEXT: vl1re64.v v17, (a4) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16f64_nxv2f64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; ZVBB-NEXT: vmv2r.v v28, v22 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: vmv2r.v v26, v18 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: slli a1, a1, 3 ; ZVBB-NEXT: add a1, sp, a1 ; ZVBB-NEXT: addi a1, a1, 16 ; ZVBB-NEXT: csrr a3, vlenb ; ZVBB-NEXT: add a2, a0, a3 ; ZVBB-NEXT: add a4, a1, a3 ; ZVBB-NEXT: add a5, a2, a3 ; ZVBB-NEXT: vmv1r.v v1, v8 ; ZVBB-NEXT: vmv2r.v v24, v14 ; ZVBB-NEXT: add a6, a4, a3 ; ZVBB-NEXT: vmv2r.v v22, v10 ; ZVBB-NEXT: vmv1r.v v2, v22 ; ZVBB-NEXT: add a7, a5, a3 ; ZVBB-NEXT: vmv1r.v v3, v12 ; ZVBB-NEXT: add t0, a6, a3 ; ZVBB-NEXT: vmv1r.v v4, v24 ; ZVBB-NEXT: add t1, a7, a3 ; ZVBB-NEXT: vmv1r.v v5, v16 ; ZVBB-NEXT: add t2, t0, a3 ; ZVBB-NEXT: vmv1r.v v6, v26 ; ZVBB-NEXT: add t3, t1, a3 ; ZVBB-NEXT: vmv1r.v v7, v20 ; ZVBB-NEXT: add t4, t2, a3 ; ZVBB-NEXT: vmv1r.v v8, v28 ; ZVBB-NEXT: vmv1r.v v22, v9 ; ZVBB-NEXT: add t5, t3, a3 ; ZVBB-NEXT: vmv1r.v v24, v13 ; ZVBB-NEXT: add t6, t4, a3 ; ZVBB-NEXT: vmv1r.v v26, v17 ; ZVBB-NEXT: vsseg8e64.v v1, (a0) ; ZVBB-NEXT: vmv1r.v v28, v21 ; ZVBB-NEXT: vsseg8e64.v v22, (a1) ; ZVBB-NEXT: vl1re64.v v14, (t5) ; ZVBB-NEXT: add t5, t5, a3 ; ZVBB-NEXT: add a3, t6, a3 ; ZVBB-NEXT: vl1re64.v v22, (t6) ; ZVBB-NEXT: vl1re64.v v15, (t5) ; ZVBB-NEXT: vl1re64.v v23, (a3) ; ZVBB-NEXT: vl1re64.v v12, (t1) ; ZVBB-NEXT: vl1re64.v v20, (t2) ; ZVBB-NEXT: vl1re64.v v13, (t3) ; ZVBB-NEXT: vl1re64.v v21, (t4) ; ZVBB-NEXT: vl1re64.v v10, (a5) ; ZVBB-NEXT: vl1re64.v v18, (a6) ; ZVBB-NEXT: vl1re64.v v11, (a7) ; ZVBB-NEXT: vl1re64.v v19, (t0) ; ZVBB-NEXT: vl1re64.v v8, (a0) ; ZVBB-NEXT: vl1re64.v v16, (a1) ; ZVBB-NEXT: vl1re64.v v9, (a2) ; ZVBB-NEXT: vl1re64.v v17, (a4) ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 4 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.vector.interleave8.nxv16f64( %v0, %v1, %v2, %v3, %v4, %v5, %v6, %v7) ret %res } define @interleave2_same_const_splat_nxv4i16() { ; CHECK-LABEL: interleave2_same_const_splat_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: interleave2_same_const_splat_nxv4i16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vmv.v.i v8, 3 ; ZVBB-NEXT: ret %retval = call @llvm.vector.interleave2.nxv4i16( splat(i16 3), splat(i16 3)) ret %retval } define @interleave2_diff_const_splat_nxv4i16() { ; V-LABEL: interleave2_diff_const_splat_nxv4i16: ; V: # %bb.0: ; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; V-NEXT: vmv.v.i v9, 3 ; V-NEXT: li a0, 4 ; V-NEXT: vmv.v.i v10, -1 ; V-NEXT: vwaddu.vx v8, v9, a0 ; V-NEXT: vwmaccu.vx v8, a0, v10 ; V-NEXT: csrr a0, vlenb ; V-NEXT: srli a0, a0, 2 ; V-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; V-NEXT: vslidedown.vx v9, v8, a0 ; V-NEXT: vslideup.vx v8, v9, a0 ; V-NEXT: ret ; ; ZVBB-LABEL: interleave2_diff_const_splat_nxv4i16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vmv.v.i v8, 4 ; ZVBB-NEXT: li a0, 3 ; ZVBB-NEXT: vwsll.vi v9, v8, 16 ; ZVBB-NEXT: vwaddu.wx v8, v9, a0 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: srli a0, a0, 2 ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslidedown.vx v9, v8, a0 ; ZVBB-NEXT: vslideup.vx v8, v9, a0 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: interleave2_diff_const_splat_nxv4i16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZIP-NEXT: vmv.v.i v9, 4 ; ZIP-NEXT: vmv.v.i v10, 3 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: ri.vzip2b.vv v11, v10, v9 ; ZIP-NEXT: ri.vzip2a.vv v8, v10, v9 ; ZIP-NEXT: srli a0, a0, 2 ; ZIP-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; ZIP-NEXT: vslideup.vx v8, v11, a0 ; ZIP-NEXT: ret %retval = call @llvm.vector.interleave2.v4i16( splat(i16 3), splat(i16 4)) ret %retval } define @interleave2_same_nonconst_splat_nxv4i16(i16 %a) { ; CHECK-LABEL: interleave2_same_nonconst_splat_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: interleave2_same_nonconst_splat_nxv4i16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; ZVBB-NEXT: vmv.v.x v8, a0 ; ZVBB-NEXT: ret %ins = insertelement poison, i16 %a, i32 0 %splat = shufflevector %ins, poison, zeroinitializer %retval = call @llvm.vector.interleave2.nxv4i16( %splat, %splat) ret %retval } define @interleave2_diff_nonconst_splat_nxv4i16(i16 %a, i16 %b) { ; V-LABEL: interleave2_diff_nonconst_splat_nxv4i16: ; V: # %bb.0: ; V-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; V-NEXT: vmv.v.x v9, a0 ; V-NEXT: vmv.v.i v10, -1 ; V-NEXT: csrr a0, vlenb ; V-NEXT: vwaddu.vx v8, v9, a1 ; V-NEXT: vwmaccu.vx v8, a1, v10 ; V-NEXT: srli a0, a0, 2 ; V-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; V-NEXT: vslidedown.vx v9, v8, a0 ; V-NEXT: vslideup.vx v8, v9, a0 ; V-NEXT: ret ; ; ZVBB-LABEL: interleave2_diff_nonconst_splat_nxv4i16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vmv.v.x v8, a1 ; ZVBB-NEXT: csrr a1, vlenb ; ZVBB-NEXT: vwsll.vi v9, v8, 16 ; ZVBB-NEXT: vwaddu.wx v8, v9, a0 ; ZVBB-NEXT: srli a1, a1, 2 ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslidedown.vx v9, v8, a1 ; ZVBB-NEXT: vslideup.vx v8, v9, a1 ; ZVBB-NEXT: ret ; ; ZIP-LABEL: interleave2_diff_nonconst_splat_nxv4i16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZIP-NEXT: vmv.v.x v9, a0 ; ZIP-NEXT: vmv.v.x v10, a1 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: ri.vzip2b.vv v11, v9, v10 ; ZIP-NEXT: ri.vzip2a.vv v8, v9, v10 ; ZIP-NEXT: srli a0, a0, 2 ; ZIP-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; ZIP-NEXT: vslideup.vx v8, v11, a0 ; ZIP-NEXT: ret %ins1 = insertelement poison, i16 %a, i32 0 %splat1 = shufflevector %ins1, poison, zeroinitializer %ins2 = insertelement poison, i16 %b, i32 0 %splat2 = shufflevector %ins2, poison, zeroinitializer %retval = call @llvm.vector.interleave2.nxv4i16( %splat1, %splat2) ret %retval } define @interleave4_same_const_splat_nxv8i16() { ; CHECK-LABEL: interleave4_same_const_splat_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: interleave4_same_const_splat_nxv8i16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZVBB-NEXT: vmv.v.i v8, 3 ; ZVBB-NEXT: ret %retval = call @llvm.vector.interleave4.nxv8i16( splat(i16 3), splat(i16 3), splat(i16 3), splat(i16 3)) ret %retval }