; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V128,RV32-V128 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V128,RV64-V128 ; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+zvl512b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V512,RV32-V512 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+zvl512b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V512,RV64-V512 ; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+experimental-xrivosvizip -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZIP,RV32-ZIP ; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+experimental-xrivosvizip -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZIP,RV64-ZIP ; Test optimizing interleaves to widening arithmetic. define <4 x i8> @interleave_v2i8(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: interleave_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret ; ; ZIP-LABEL: interleave_v2i8: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9 ; ZIP-NEXT: vmv1r.v v8, v10 ; ZIP-NEXT: ret %a = shufflevector <2 x i8> %x, <2 x i8> %y, <4 x i32> ret <4 x i8> %a } define <4 x i16> @interleave_v2i16(<2 x i16> %x, <2 x i16> %y) { ; CHECK-LABEL: interleave_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret ; ; ZIP-LABEL: interleave_v2i16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9 ; ZIP-NEXT: vmv1r.v v8, v10 ; ZIP-NEXT: ret %a = shufflevector <2 x i16> %x, <2 x i16> %y, <4 x i32> ret <4 x i16> %a } ; Vector order switched for coverage. define <4 x i32> @interleave_v2i32(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: interleave_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v9, v8 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret ; ; ZIP-LABEL: interleave_v2i32: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v10, v9, v8 ; ZIP-NEXT: vmv.v.v v8, v10 ; ZIP-NEXT: ret %a = shufflevector <2 x i32> %x, <2 x i32> %y, <4 x i32> ret <4 x i32> %a } ; One vXi64 test case to very that we don't optimize it. ; FIXME: Is there better codegen we can do here? define <4 x i64> @interleave_v2i64(<2 x i64> %x, <2 x i64> %y) { ; V128-LABEL: interleave_v2i64: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; V128-NEXT: vmv1r.v v10, v9 ; V128-NEXT: vmv.v.i v0, 10 ; V128-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; V128-NEXT: vslideup.vi v12, v10, 1 ; V128-NEXT: vslideup.vi v12, v10, 2 ; V128-NEXT: vmv2r.v v10, v8 ; V128-NEXT: vslideup.vi v10, v8, 1 ; V128-NEXT: vmerge.vvm v8, v10, v12, v0 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v2i64: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 4, e64, m1, ta, ma ; V512-NEXT: vslideup.vi v10, v9, 1 ; V512-NEXT: vmv1r.v v11, v8 ; V512-NEXT: vslideup.vi v10, v9, 2 ; V512-NEXT: vmv.v.i v0, 10 ; V512-NEXT: vslideup.vi v11, v8, 1 ; V512-NEXT: vmerge.vvm v8, v11, v10, v0 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v2i64: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; ZIP-NEXT: vmv1r.v v12, v9 ; ZIP-NEXT: ri.vzip2a.vv v10, v8, v12 ; ZIP-NEXT: vmv.v.v v8, v10 ; ZIP-NEXT: ret %a = shufflevector <2 x i64> %x, <2 x i64> %y, <4 x i32> ret <4 x i64> %a } ; Vector order switched for coverage. define <8 x i8> @interleave_v4i8(<4 x i8> %x, <4 x i8> %y) { ; V128-LABEL: interleave_v4i8: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; V128-NEXT: vwaddu.vv v10, v9, v8 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v10, a0, v8 ; V128-NEXT: vmv1r.v v8, v10 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v4i8: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 4, e8, mf8, ta, ma ; V512-NEXT: vwaddu.vv v10, v9, v8 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v8 ; V512-NEXT: vmv1r.v v8, v10 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v4i8: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v10, v9, v8 ; ZIP-NEXT: vmv1r.v v8, v10 ; ZIP-NEXT: ret %a = shufflevector <4 x i8> %x, <4 x i8> %y, <8 x i32> ret <8 x i8> %a } ; Undef elements for coverage define <8 x i16> @interleave_v4i16(<4 x i16> %x, <4 x i16> %y) { ; V128-LABEL: interleave_v4i16: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; V128-NEXT: vwaddu.vv v10, v8, v9 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v10, a0, v9 ; V128-NEXT: vmv1r.v v8, v10 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v4i16: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 ; V512-NEXT: vmv1r.v v8, v10 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v4i16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9 ; ZIP-NEXT: vmv.v.v v8, v10 ; ZIP-NEXT: ret %a = shufflevector <4 x i16> %x, <4 x i16> %y, <8 x i32> ret <8 x i16> %a } define <8 x i32> @interleave_v4i32(<4 x i32> %x, <4 x i32> %y) { ; V128-LABEL: interleave_v4i32: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; V128-NEXT: vmv1r.v v10, v9 ; V128-NEXT: vmv1r.v v11, v8 ; V128-NEXT: vwaddu.vv v8, v11, v10 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v8, a0, v10 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v4i32: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 4, e32, mf2, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 ; V512-NEXT: vmv1r.v v8, v10 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v4i32: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; ZIP-NEXT: vmv1r.v v12, v9 ; ZIP-NEXT: ri.vzip2a.vv v10, v8, v12 ; ZIP-NEXT: vmv.v.v v8, v10 ; ZIP-NEXT: ret %a = shufflevector <4 x i32> %x, <4 x i32> %y, <8 x i32> ret <8 x i32> %a } ; %y should be slid down by 2 define <4 x i32> @interleave_v4i32_offset_2(<4 x i32> %x, <4 x i32> %y) { ; V128-LABEL: interleave_v4i32_offset_2: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; V128-NEXT: vslidedown.vi v10, v9, 2 ; V128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; V128-NEXT: vwaddu.vv v9, v8, v10 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v9, a0, v10 ; V128-NEXT: vmv1r.v v8, v9 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v4i32_offset_2: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; V512-NEXT: vslidedown.vi v10, v9, 2 ; V512-NEXT: vwaddu.vv v9, v8, v10 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v9, a0, v10 ; V512-NEXT: vmv1r.v v8, v9 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v4i32_offset_2: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; ZIP-NEXT: vslidedown.vi v10, v9, 2 ; ZIP-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v9, v8, v10 ; ZIP-NEXT: vmv.v.v v8, v9 ; ZIP-NEXT: ret %a = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> ret <4 x i32> %a } ; %y should be slid down by 1 define <4 x i32> @interleave_v4i32_offset_1(<4 x i32> %x, <4 x i32> %y) { ; V128-LABEL: interleave_v4i32_offset_1: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; V128-NEXT: vmv.v.i v0, 8 ; V128-NEXT: vmv1r.v v10, v9 ; V128-NEXT: vslideup.vi v10, v9, 1, v0.t ; V128-NEXT: vmv.v.i v0, 10 ; V128-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; V128-NEXT: vzext.vf2 v9, v8 ; V128-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; V128-NEXT: vmerge.vvm v8, v9, v10, v0 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v4i32_offset_1: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 4, e32, mf2, ta, mu ; V512-NEXT: vmv.v.i v0, 8 ; V512-NEXT: vmv1r.v v10, v9 ; V512-NEXT: vslideup.vi v10, v9, 1, v0.t ; V512-NEXT: vmv.v.i v0, 10 ; V512-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; V512-NEXT: vzext.vf2 v9, v8 ; V512-NEXT: vsetivli zero, 4, e32, mf2, ta, ma ; V512-NEXT: vmerge.vvm v8, v9, v10, v0 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v4i32_offset_1: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; ZIP-NEXT: vmv.v.i v0, 8 ; ZIP-NEXT: vmv1r.v v10, v9 ; ZIP-NEXT: vslideup.vi v10, v9, 1, v0.t ; ZIP-NEXT: vmv.v.i v0, 10 ; ZIP-NEXT: ri.vzip2a.vv v11, v8, v9 ; ZIP-NEXT: vmerge.vvm v8, v11, v10, v0 ; ZIP-NEXT: ret %a = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> ret <4 x i32> %a } define <16 x i8> @interleave_v8i8(<8 x i8> %x, <8 x i8> %y) { ; V128-LABEL: interleave_v8i8: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; V128-NEXT: vwaddu.vv v10, v8, v9 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v10, a0, v9 ; V128-NEXT: vmv1r.v v8, v10 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v8i8: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 8, e8, mf8, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 ; V512-NEXT: vmv1r.v v8, v10 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v8i8: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v10, v8, v9 ; ZIP-NEXT: vmv.v.v v8, v10 ; ZIP-NEXT: ret %a = shufflevector <8 x i8> %x, <8 x i8> %y, <16 x i32> ret <16 x i8> %a } ; Vector order switched for coverage. define <16 x i16> @interleave_v8i16(<8 x i16> %x, <8 x i16> %y) { ; V128-LABEL: interleave_v8i16: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; V128-NEXT: vmv1r.v v10, v9 ; V128-NEXT: vmv1r.v v11, v8 ; V128-NEXT: vwaddu.vv v8, v10, v11 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v8, a0, v11 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v8i16: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 8, e16, mf4, ta, ma ; V512-NEXT: vwaddu.vv v10, v9, v8 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v8 ; V512-NEXT: vmv1r.v v8, v10 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v8i16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; ZIP-NEXT: vmv1r.v v12, v9 ; ZIP-NEXT: ri.vzip2a.vv v10, v12, v8 ; ZIP-NEXT: vmv.v.v v8, v10 ; ZIP-NEXT: ret %a = shufflevector <8 x i16> %x, <8 x i16> %y, <16 x i32> ret <16 x i16> %a } define <16 x i32> @interleave_v8i32(<8 x i32> %x, <8 x i32> %y) { ; V128-LABEL: interleave_v8i32: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; V128-NEXT: vmv2r.v v12, v10 ; V128-NEXT: vmv2r.v v14, v8 ; V128-NEXT: vwaddu.vv v8, v14, v12 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v8, a0, v12 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v8i32: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 8, e32, mf2, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 ; V512-NEXT: vmv1r.v v8, v10 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v8i32: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; ZIP-NEXT: vmv2r.v v16, v10 ; ZIP-NEXT: ri.vzip2a.vv v12, v8, v16 ; ZIP-NEXT: vmv.v.v v8, v12 ; ZIP-NEXT: ret %a = shufflevector <8 x i32> %x, <8 x i32> %y, <16 x i32> ret <16 x i32> %a } define <32 x i8> @interleave_v16i8(<16 x i8> %x, <16 x i8> %y) { ; V128-LABEL: interleave_v16i8: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; V128-NEXT: vmv1r.v v10, v9 ; V128-NEXT: vmv1r.v v11, v8 ; V128-NEXT: vwaddu.vv v8, v11, v10 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v8, a0, v10 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v16i8: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 16, e8, mf4, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 ; V512-NEXT: vmv1r.v v8, v10 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v16i8: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; ZIP-NEXT: vmv1r.v v12, v9 ; ZIP-NEXT: li a0, 32 ; ZIP-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v10, v8, v12 ; ZIP-NEXT: vmv.v.v v8, v10 ; ZIP-NEXT: ret %a = shufflevector <16 x i8> %x, <16 x i8> %y, <32 x i32> ret <32 x i8> %a } define <32 x i16> @interleave_v16i16(<16 x i16> %x, <16 x i16> %y) { ; V128-LABEL: interleave_v16i16: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; V128-NEXT: vmv2r.v v12, v10 ; V128-NEXT: vmv2r.v v14, v8 ; V128-NEXT: vwaddu.vv v8, v14, v12 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v8, a0, v12 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v16i16: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 16, e16, mf2, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 ; V512-NEXT: vmv1r.v v8, v10 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v16i16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; ZIP-NEXT: vmv2r.v v16, v10 ; ZIP-NEXT: li a0, 32 ; ZIP-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v12, v8, v16 ; ZIP-NEXT: vmv.v.v v8, v12 ; ZIP-NEXT: ret %a = shufflevector <16 x i16> %x, <16 x i16> %y, <32 x i32> ret <32 x i16> %a } define <32 x i32> @interleave_v16i32(<16 x i32> %x, <16 x i32> %y) { ; V128-LABEL: interleave_v16i32: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; V128-NEXT: vmv4r.v v16, v12 ; V128-NEXT: vmv4r.v v20, v8 ; V128-NEXT: vwaddu.vv v8, v20, v16 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v8, a0, v16 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v16i32: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 16, e32, m1, ta, ma ; V512-NEXT: vmv1r.v v10, v9 ; V512-NEXT: vmv1r.v v11, v8 ; V512-NEXT: vwaddu.vv v8, v11, v10 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v8, a0, v10 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v16i32: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; ZIP-NEXT: vmv4r.v v24, v12 ; ZIP-NEXT: li a0, 32 ; ZIP-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v16, v8, v24 ; ZIP-NEXT: vmv.v.v v8, v16 ; ZIP-NEXT: ret %a = shufflevector <16 x i32> %x, <16 x i32> %y, <32 x i32> ret <32 x i32> %a } define <64 x i8> @interleave_v32i8(<32 x i8> %x, <32 x i8> %y) { ; V128-LABEL: interleave_v32i8: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; V128-NEXT: vmv2r.v v12, v10 ; V128-NEXT: vmv2r.v v14, v8 ; V128-NEXT: li a0, 32 ; V128-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; V128-NEXT: vwaddu.vv v8, v14, v12 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v8, a0, v12 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v32i8: ; V512: # %bb.0: ; V512-NEXT: li a0, 32 ; V512-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 ; V512-NEXT: vmv1r.v v8, v10 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v32i8: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; ZIP-NEXT: vmv2r.v v16, v10 ; ZIP-NEXT: li a0, 64 ; ZIP-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v12, v8, v16 ; ZIP-NEXT: vmv.v.v v8, v12 ; ZIP-NEXT: ret %a = shufflevector <32 x i8> %x, <32 x i8> %y, <64 x i32> ret <64 x i8> %a } define <64 x i16> @interleave_v32i16(<32 x i16> %x, <32 x i16> %y) { ; V128-LABEL: interleave_v32i16: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; V128-NEXT: vmv4r.v v16, v12 ; V128-NEXT: vmv4r.v v20, v8 ; V128-NEXT: li a0, 32 ; V128-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; V128-NEXT: vwaddu.vv v8, v20, v16 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v8, a0, v16 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v32i16: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; V512-NEXT: vmv1r.v v10, v9 ; V512-NEXT: vmv1r.v v11, v8 ; V512-NEXT: li a0, 32 ; V512-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; V512-NEXT: vwaddu.vv v8, v11, v10 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v8, a0, v10 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v32i16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; ZIP-NEXT: vmv4r.v v24, v12 ; ZIP-NEXT: li a0, 64 ; ZIP-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v16, v8, v24 ; ZIP-NEXT: vmv.v.v v8, v16 ; ZIP-NEXT: ret %a = shufflevector <32 x i16> %x, <32 x i16> %y, <64 x i32> ret <64 x i16> %a } define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) { ; V128-LABEL: interleave_v32i32: ; V128: # %bb.0: ; V128-NEXT: addi sp, sp, -16 ; V128-NEXT: .cfi_def_cfa_offset 16 ; V128-NEXT: csrr a0, vlenb ; V128-NEXT: slli a0, a0, 3 ; V128-NEXT: sub sp, sp, a0 ; V128-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; V128-NEXT: addi a0, sp, 16 ; V128-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; V128-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; V128-NEXT: vslidedown.vi v24, v16, 16 ; V128-NEXT: li a0, 32 ; V128-NEXT: lui a1, 699051 ; V128-NEXT: vslidedown.vi v0, v8, 16 ; V128-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; V128-NEXT: vzext.vf2 v8, v24 ; V128-NEXT: addi a1, a1, -1366 ; V128-NEXT: vzext.vf2 v24, v0 ; V128-NEXT: vmv.s.x v0, a1 ; V128-NEXT: vsll.vx v8, v8, a0 ; V128-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; V128-NEXT: vmerge.vvm v24, v24, v8, v0 ; V128-NEXT: addi a0, sp, 16 ; V128-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; V128-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; V128-NEXT: vwaddu.vv v0, v8, v16 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v0, a0, v16 ; V128-NEXT: vmv8r.v v8, v0 ; V128-NEXT: vmv8r.v v16, v24 ; V128-NEXT: csrr a0, vlenb ; V128-NEXT: slli a0, a0, 3 ; V128-NEXT: add sp, sp, a0 ; V128-NEXT: .cfi_def_cfa sp, 16 ; V128-NEXT: addi sp, sp, 16 ; V128-NEXT: .cfi_def_cfa_offset 0 ; V128-NEXT: ret ; ; V512-LABEL: interleave_v32i32: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; V512-NEXT: vmv2r.v v12, v10 ; V512-NEXT: vmv2r.v v14, v8 ; V512-NEXT: li a0, 32 ; V512-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; V512-NEXT: vwaddu.vv v8, v14, v12 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v8, a0, v12 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_v32i32: ; ZIP: # %bb.0: ; ZIP-NEXT: addi sp, sp, -16 ; ZIP-NEXT: .cfi_def_cfa_offset 16 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 40 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: sub sp, sp, a0 ; ZIP-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: slli a0, a0, 5 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 16 ; ZIP-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill ; ZIP-NEXT: addi a0, sp, 16 ; ZIP-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill ; ZIP-NEXT: li a0, 32 ; ZIP-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; ZIP-NEXT: vslidedown.vi v16, v8, 16 ; ZIP-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v8, v16, v0 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a1, a1, 3 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 16 ; ZIP-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a1, a1, 5 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 16 ; ZIP-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; ZIP-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; ZIP-NEXT: vslidedown.vi v16, v16, 16 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: li a2, 24 ; ZIP-NEXT: mul a1, a1, a2 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 16 ; ZIP-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill ; ZIP-NEXT: lui a1, 699051 ; ZIP-NEXT: addi a1, a1, -1366 ; ZIP-NEXT: vmv.s.x v0, a1 ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a1, a1, 4 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 16 ; ZIP-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: li a2, 24 ; ZIP-NEXT: mul a1, a1, a2 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 16 ; ZIP-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a1, a1, 4 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 16 ; ZIP-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload ; ZIP-NEXT: csrr a1, vlenb ; ZIP-NEXT: slli a1, a1, 3 ; ZIP-NEXT: add a1, sp, a1 ; ZIP-NEXT: addi a1, a1, 16 ; ZIP-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload ; ZIP-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; ZIP-NEXT: ri.vzip2a.vv v8, v24, v16, v0.t ; ZIP-NEXT: vmv.v.v v24, v8 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: slli a0, a0, 5 ; ZIP-NEXT: add a0, sp, a0 ; ZIP-NEXT: addi a0, a0, 16 ; ZIP-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload ; ZIP-NEXT: addi a0, sp, 16 ; ZIP-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload ; ZIP-NEXT: ri.vzip2a.vv v0, v8, v16 ; ZIP-NEXT: vmv.v.v v8, v0 ; ZIP-NEXT: vmv.v.v v16, v24 ; ZIP-NEXT: csrr a0, vlenb ; ZIP-NEXT: li a1, 40 ; ZIP-NEXT: mul a0, a0, a1 ; ZIP-NEXT: add sp, sp, a0 ; ZIP-NEXT: .cfi_def_cfa sp, 16 ; ZIP-NEXT: addi sp, sp, 16 ; ZIP-NEXT: .cfi_def_cfa_offset 0 ; ZIP-NEXT: ret %a = shufflevector <32 x i32> %x, <32 x i32> %y, <64 x i32> ret <64 x i32> %a } define <4 x i8> @unary_interleave_v4i8(<4 x i8> %x) { ; V128-LABEL: unary_interleave_v4i8: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; V128-NEXT: vslidedown.vi v10, v8, 2 ; V128-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; V128-NEXT: vwaddu.vv v9, v8, v10 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v9, a0, v10 ; V128-NEXT: vmv1r.v v8, v9 ; V128-NEXT: ret ; ; V512-LABEL: unary_interleave_v4i8: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; V512-NEXT: vslidedown.vi v10, v8, 2 ; V512-NEXT: vwaddu.vv v9, v8, v10 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v9, a0, v10 ; V512-NEXT: vmv1r.v v8, v9 ; V512-NEXT: ret ; ; ZIP-LABEL: unary_interleave_v4i8: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; ZIP-NEXT: vslidedown.vi v10, v8, 2 ; ZIP-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v9, v8, v10 ; ZIP-NEXT: vmv1r.v v8, v9 ; ZIP-NEXT: ret %a = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> ret <4 x i8> %a } ; This shouldn't be interleaved define <4 x i8> @unary_interleave_v4i8_invalid(<4 x i8> %x) { ; V128-LABEL: unary_interleave_v4i8_invalid: ; V128: # %bb.0: ; V128-NEXT: lui a0, 16 ; V128-NEXT: addi a0, a0, 768 ; V128-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; V128-NEXT: vmv.s.x v10, a0 ; V128-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; V128-NEXT: vrgather.vv v9, v8, v10 ; V128-NEXT: vmv1r.v v8, v9 ; V128-NEXT: ret ; ; V512-LABEL: unary_interleave_v4i8_invalid: ; V512: # %bb.0: ; V512-NEXT: lui a0, 16 ; V512-NEXT: addi a0, a0, 768 ; V512-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; V512-NEXT: vmv.s.x v10, a0 ; V512-NEXT: vsetivli zero, 4, e8, mf8, ta, ma ; V512-NEXT: vrgather.vv v9, v8, v10 ; V512-NEXT: vmv1r.v v8, v9 ; V512-NEXT: ret ; ; ZIP-LABEL: unary_interleave_v4i8_invalid: ; ZIP: # %bb.0: ; ZIP-NEXT: lui a0, 16 ; ZIP-NEXT: addi a0, a0, 768 ; ZIP-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; ZIP-NEXT: vmv.s.x v10, a0 ; ZIP-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; ZIP-NEXT: vrgather.vv v9, v8, v10 ; ZIP-NEXT: vmv1r.v v8, v9 ; ZIP-NEXT: ret %a = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> ret <4 x i8> %a } define <4 x i16> @unary_interleave_v4i16(<4 x i16> %x) { ; V128-LABEL: unary_interleave_v4i16: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; V128-NEXT: vslidedown.vi v10, v8, 2 ; V128-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; V128-NEXT: vwaddu.vv v9, v8, v10 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v9, a0, v10 ; V128-NEXT: vmv1r.v v8, v9 ; V128-NEXT: ret ; ; V512-LABEL: unary_interleave_v4i16: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; V512-NEXT: vslidedown.vi v10, v8, 2 ; V512-NEXT: vwaddu.vv v9, v8, v10 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v9, a0, v10 ; V512-NEXT: vmv1r.v v8, v9 ; V512-NEXT: ret ; ; ZIP-LABEL: unary_interleave_v4i16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; ZIP-NEXT: vslidedown.vi v10, v8, 2 ; ZIP-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v9, v8, v10 ; ZIP-NEXT: vmv1r.v v8, v9 ; ZIP-NEXT: ret %a = shufflevector <4 x i16> %x, <4 x i16> poison, <4 x i32> ret <4 x i16> %a } define <4 x i32> @unary_interleave_v4i32(<4 x i32> %x) { ; V128-LABEL: unary_interleave_v4i32: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; V128-NEXT: vslidedown.vi v10, v8, 2 ; V128-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; V128-NEXT: vwaddu.vv v9, v8, v10 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v9, a0, v10 ; V128-NEXT: vmv1r.v v8, v9 ; V128-NEXT: ret ; ; V512-LABEL: unary_interleave_v4i32: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; V512-NEXT: vslidedown.vi v10, v8, 2 ; V512-NEXT: vwaddu.vv v9, v8, v10 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v9, a0, v10 ; V512-NEXT: vmv1r.v v8, v9 ; V512-NEXT: ret ; ; ZIP-LABEL: unary_interleave_v4i32: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; ZIP-NEXT: vslidedown.vi v10, v8, 2 ; ZIP-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v9, v8, v10 ; ZIP-NEXT: vmv.v.v v8, v9 ; ZIP-NEXT: ret %a = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> ret <4 x i32> %a } ; FIXME: Is there better codegen we can do here? define <4 x i64> @unary_interleave_v4i64(<4 x i64> %x) { ; V128-LABEL: unary_interleave_v4i64: ; V128: # %bb.0: ; V128-NEXT: lui a0, 12304 ; V128-NEXT: addi a0, a0, 512 ; V128-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; V128-NEXT: vmv.s.x v10, a0 ; V128-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; V128-NEXT: vsext.vf2 v12, v10 ; V128-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; V128-NEXT: vrgatherei16.vv v10, v8, v12 ; V128-NEXT: vmv.v.v v8, v10 ; V128-NEXT: ret ; ; RV32-V512-LABEL: unary_interleave_v4i64: ; RV32-V512: # %bb.0: ; RV32-V512-NEXT: lui a0, 12304 ; RV32-V512-NEXT: addi a0, a0, 512 ; RV32-V512-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-V512-NEXT: vmv.s.x v9, a0 ; RV32-V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma ; RV32-V512-NEXT: vsext.vf2 v10, v9 ; RV32-V512-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-V512-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-V512-NEXT: vmv.v.v v8, v9 ; RV32-V512-NEXT: ret ; ; RV64-V512-LABEL: unary_interleave_v4i64: ; RV64-V512: # %bb.0: ; RV64-V512-NEXT: lui a0, 12304 ; RV64-V512-NEXT: addi a0, a0, 512 ; RV64-V512-NEXT: vsetivli zero, 4, e64, m1, ta, ma ; RV64-V512-NEXT: vmv.s.x v9, a0 ; RV64-V512-NEXT: vsext.vf8 v10, v9 ; RV64-V512-NEXT: vrgather.vv v9, v8, v10 ; RV64-V512-NEXT: vmv.v.v v8, v9 ; RV64-V512-NEXT: ret ; ; ZIP-LABEL: unary_interleave_v4i64: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 2, e64, m2, ta, ma ; ZIP-NEXT: vslidedown.vi v12, v8, 2 ; ZIP-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v10, v8, v12 ; ZIP-NEXT: vmv.v.v v8, v10 ; ZIP-NEXT: ret %a = shufflevector <4 x i64> %x, <4 x i64> poison, <4 x i32> ret <4 x i64> %a } define <8 x i8> @unary_interleave_v8i8(<8 x i8> %x) { ; V128-LABEL: unary_interleave_v8i8: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; V128-NEXT: vslidedown.vi v10, v8, 4 ; V128-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; V128-NEXT: vwaddu.vv v9, v8, v10 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v9, a0, v10 ; V128-NEXT: vmv1r.v v8, v9 ; V128-NEXT: ret ; ; V512-LABEL: unary_interleave_v8i8: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 4, e8, mf8, ta, ma ; V512-NEXT: vslidedown.vi v10, v8, 4 ; V512-NEXT: vwaddu.vv v9, v8, v10 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v9, a0, v10 ; V512-NEXT: vmv1r.v v8, v9 ; V512-NEXT: ret ; ; ZIP-LABEL: unary_interleave_v8i8: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; ZIP-NEXT: vslidedown.vi v10, v8, 4 ; ZIP-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v9, v8, v10 ; ZIP-NEXT: vmv1r.v v8, v9 ; ZIP-NEXT: ret %a = shufflevector <8 x i8> %x, <8 x i8> poison, <8 x i32> ret <8 x i8> %a } define <8 x i16> @unary_interleave_v8i16(<8 x i16> %x) { ; V128-LABEL: unary_interleave_v8i16: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; V128-NEXT: vslidedown.vi v10, v8, 4 ; V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; V128-NEXT: vwaddu.vv v9, v10, v8 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v9, a0, v8 ; V128-NEXT: vmv1r.v v8, v9 ; V128-NEXT: ret ; ; V512-LABEL: unary_interleave_v8i16: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma ; V512-NEXT: vslidedown.vi v10, v8, 4 ; V512-NEXT: vwaddu.vv v9, v10, v8 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v9, a0, v8 ; V512-NEXT: vmv1r.v v8, v9 ; V512-NEXT: ret ; ; ZIP-LABEL: unary_interleave_v8i16: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; ZIP-NEXT: vslidedown.vi v10, v8, 4 ; ZIP-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v9, v10, v8 ; ZIP-NEXT: vmv.v.v v8, v9 ; ZIP-NEXT: ret %a = shufflevector <8 x i16> %x, <8 x i16> poison, <8 x i32> ret <8 x i16> %a } define <8 x i32> @unary_interleave_v8i32(<8 x i32> %x) { ; V128-LABEL: unary_interleave_v8i32: ; V128: # %bb.0: ; V128-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; V128-NEXT: vslidedown.vi v12, v8, 4 ; V128-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; V128-NEXT: vwaddu.vv v10, v8, v12 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v10, a0, v12 ; V128-NEXT: vmv2r.v v8, v10 ; V128-NEXT: ret ; ; V512-LABEL: unary_interleave_v8i32: ; V512: # %bb.0: ; V512-NEXT: vsetivli zero, 4, e32, mf2, ta, ma ; V512-NEXT: vslidedown.vi v10, v8, 4 ; V512-NEXT: vwaddu.vv v9, v8, v10 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v9, a0, v10 ; V512-NEXT: vmv1r.v v8, v9 ; V512-NEXT: ret ; ; ZIP-LABEL: unary_interleave_v8i32: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; ZIP-NEXT: vslidedown.vi v12, v8, 4 ; ZIP-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; ZIP-NEXT: ri.vzip2a.vv v10, v8, v12 ; ZIP-NEXT: vmv.v.v v8, v10 ; ZIP-NEXT: ret %a = shufflevector <8 x i32> %x, <8 x i32> poison, <8 x i32> ret <8 x i32> %a } ; This interleaves the first 2 elements of a vector in opposite order. With ; undefs for the remaining elements. We use to miscompile this. define <4 x i8> @unary_interleave_10uu_v4i8(<4 x i8> %x) { ; CHECK-LABEL: unary_interleave_10uu_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret ; ; ZIP-LABEL: unary_interleave_10uu_v4i8: ; ZIP: # %bb.0: ; ZIP-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; ZIP-NEXT: vsrl.vi v9, v8, 8 ; ZIP-NEXT: vsll.vi v8, v8, 8 ; ZIP-NEXT: vor.vv v8, v8, v9 ; ZIP-NEXT: ret %a = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> ret <4 x i8> %a } define <16 x i16> @interleave_slp(<8 x i16> %v0, <8 x i16> %v1) { ; V128-LABEL: interleave_slp: ; V128: # %bb.0: # %entry ; V128-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; V128-NEXT: vmv1r.v v10, v9 ; V128-NEXT: vmv1r.v v11, v8 ; V128-NEXT: vwaddu.vv v8, v11, v10 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v8, a0, v10 ; V128-NEXT: ret ; ; V512-LABEL: interleave_slp: ; V512: # %bb.0: # %entry ; V512-NEXT: vsetivli zero, 8, e16, mf4, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 ; V512-NEXT: vmv1r.v v8, v10 ; V512-NEXT: ret ; ; ZIP-LABEL: interleave_slp: ; ZIP: # %bb.0: # %entry ; ZIP-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; ZIP-NEXT: vmv1r.v v12, v9 ; ZIP-NEXT: ri.vzip2a.vv v10, v8, v12 ; ZIP-NEXT: vmv.v.v v8, v10 ; ZIP-NEXT: ret entry: %v2 = shufflevector <8 x i16> %v0, <8 x i16> poison, <16 x i32> %v3 = shufflevector <8 x i16> %v1, <8 x i16> poison, <16 x i32> %v4 = shufflevector <16 x i16> %v2, <16 x i16> %v3, <16 x i32> ret <16 x i16> %v4 } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; RV32-V128: {{.*}} ; RV32-ZIP: {{.*}} ; RV64-V128: {{.*}} ; RV64-ZIP: {{.*}}