; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s define <4 x i8> @ret_v4i8(ptr %p) { ; CHECK-LABEL: ret_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret %v = load <4 x i8>, ptr %p ret <4 x i8> %v } define <4 x i32> @ret_v4i32(ptr %p) { ; CHECK-LABEL: ret_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret %v = load <4 x i32>, ptr %p ret <4 x i32> %v } define <8 x i32> @ret_v8i32(ptr %p) { ; CHECK-LABEL: ret_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret %v = load <8 x i32>, ptr %p ret <8 x i32> %v } define <16 x i64> @ret_v16i64(ptr %p) { ; CHECK-LABEL: ret_v16i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: ret %v = load <16 x i64>, ptr %p ret <16 x i64> %v } define <8 x i1> @ret_mask_v8i1(ptr %p) { ; CHECK-LABEL: ret_mask_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret %v = load <8 x i1>, ptr %p ret <8 x i1> %v } define <32 x i1> @ret_mask_v32i1(ptr %p) { ; CHECK-LABEL: ret_mask_v32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret %v = load <32 x i1>, ptr %p ret <32 x i1> %v } ; Return the vector via registers v8-v23 define <64 x i32> @ret_split_v64i32(ptr %x) { ; CHECK-LABEL: ret_split_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: ret %v = load <64 x i32>, ptr %x ret <64 x i32> %v } ; Return the vector fully via the stack define <128 x i32> @ret_split_v128i32(ptr %x) { ; CHECK-LABEL: ret_split_v128i32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a2, a1, 128 ; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: addi a2, a1, 256 ; CHECK-NEXT: vle32.v v16, (a2) ; CHECK-NEXT: addi a2, a1, 384 ; CHECK-NEXT: vle32.v v24, (a1) ; CHECK-NEXT: addi a1, a0, 384 ; CHECK-NEXT: vle32.v v0, (a2) ; CHECK-NEXT: addi a2, a0, 256 ; CHECK-NEXT: vse32.v v24, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vse32.v v0, (a1) ; CHECK-NEXT: vse32.v v16, (a2) ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %v = load <128 x i32>, ptr %x ret <128 x i32> %v } define <4 x i8> @ret_v8i8_param_v4i8(<4 x i8> %v) { ; CHECK-LABEL: ret_v8i8_param_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %r = add <4 x i8> %v, ret <4 x i8> %r } define <4 x i8> @ret_v4i8_param_v4i8_v4i8(<4 x i8> %v, <4 x i8> %w) { ; CHECK-LABEL: ret_v4i8_param_v4i8_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %r = add <4 x i8> %v, %w ret <4 x i8> %r } define <4 x i64> @ret_v4i64_param_v4i64_v4i64(<4 x i64> %v, <4 x i64> %w) { ; CHECK-LABEL: ret_v4i64_param_v4i64_v4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: ret %r = add <4 x i64> %v, %w ret <4 x i64> %r } define <8 x i1> @ret_v8i1_param_v8i1_v8i1(<8 x i1> %v, <8 x i1> %w) { ; CHECK-LABEL: ret_v8i1_param_v8i1_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %r = xor <8 x i1> %v, %w ret <8 x i1> %r } define <32 x i1> @ret_v32i1_param_v32i1_v32i1(<32 x i1> %v, <32 x i1> %w) { ; CHECK-LABEL: ret_v32i1_param_v32i1_v32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %r = and <32 x i1> %v, %w ret <32 x i1> %r } define <32 x i32> @ret_v32i32_param_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) { ; CHECK-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v24, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vadd.vv v8, v8, v24 ; CHECK-NEXT: vadd.vx v8, v8, a1 ; CHECK-NEXT: ret %r = add <32 x i32> %x, %y %s = add <32 x i32> %r, %z %head = insertelement <32 x i32> poison, i32 %w, i32 0 %splat = shufflevector <32 x i32> %head, <32 x i32> poison, <32 x i32> zeroinitializer %t = add <32 x i32> %s, %splat ret <32 x i32> %t } declare <32 x i32> @ext2(<32 x i32>, <32 x i32>, i32, i32) declare <32 x i32> @ext3(<32 x i32>, <32 x i32>, <32 x i32>, i32, i32) define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, i32 %w) { ; CHECK-LABEL: ret_v32i32_call_v32i32_v32i32_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset ra, -8 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: li a1, 2 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: vmv8r.v v16, v24 ; CHECK-NEXT: call ext2 ; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; CHECK-NEXT: .cfi_restore ra ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %t = call <32 x i32> @ext2(<32 x i32> %y, <32 x i32> %x, i32 %w, i32 2) ret <32 x i32> %t } define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) { ; CHECK-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -256 ; CHECK-NEXT: .cfi_def_cfa_offset 256 ; CHECK-NEXT: sd ra, 248(sp) # 8-byte Folded Spill ; CHECK-NEXT: sd s0, 240(sp) # 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset ra, -8 ; CHECK-NEXT: .cfi_offset s0, -16 ; CHECK-NEXT: addi s0, sp, 256 ; CHECK-NEXT: .cfi_def_cfa s0, 0 ; CHECK-NEXT: andi sp, sp, -128 ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v24, (a0) ; CHECK-NEXT: mv a3, sp ; CHECK-NEXT: mv a0, sp ; CHECK-NEXT: li a2, 42 ; CHECK-NEXT: vse32.v v8, (a3) ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: call ext3 ; CHECK-NEXT: addi sp, s0, -256 ; CHECK-NEXT: .cfi_def_cfa sp, 256 ; CHECK-NEXT: ld ra, 248(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 240(sp) # 8-byte Folded Reload ; CHECK-NEXT: .cfi_restore ra ; CHECK-NEXT: .cfi_restore s0 ; CHECK-NEXT: addi sp, sp, 256 ; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %t = call <32 x i32> @ext3(<32 x i32> %z, <32 x i32> %y, <32 x i32> %x, i32 %w, i32 42) ret <32 x i32> %t } ; Test various configurations of split vector types where the values are split ; across both registers and the stack. ; a0+64 z[16:31] ; v20m2 y[24:31], v22m2 z[0:7], a1+0 z[8:15], a1+32 z[16:23], ; a1+64 z[24:31] ; v16 y[12:15], v17 y[16:19], v18 y[20:23], v19 y[24:27], ; v20 y[28:31], v21 z[0:3], v22 z[4:7], v23 z[8:11], ; a1+0 z[12:15], a1+16 z[16:19], a1+32 z[20:23], a1+48 z[24:27], ; a1+64 z[28:31] define <32 x i32> @split_vector_args(<2 x i32>,<2 x i32>,<2 x i32>,<2 x i32>,<2 x i32>, <32 x i32> %y, <32 x i32> %z) { ; CHECK-LABEL: split_vector_args: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vadd.vv v8, v16, v8 ; CHECK-NEXT: ret %v0 = add <32 x i32> %y, %z ret <32 x i32> %v0 } define <32 x i32> @call_split_vector_args(ptr %pa, ptr %pb) { ; CHECK-LABEL: call_split_vector_args: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -256 ; CHECK-NEXT: .cfi_def_cfa_offset 256 ; CHECK-NEXT: sd ra, 248(sp) # 8-byte Folded Spill ; CHECK-NEXT: sd s0, 240(sp) # 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset ra, -8 ; CHECK-NEXT: .cfi_offset s0, -16 ; CHECK-NEXT: addi s0, sp, 256 ; CHECK-NEXT: .cfi_def_cfa s0, 0 ; CHECK-NEXT: andi sp, sp, -128 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (a1) ; CHECK-NEXT: mv a1, sp ; CHECK-NEXT: mv a0, sp ; CHECK-NEXT: vse32.v v16, (a1) ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: call split_vector_args ; CHECK-NEXT: addi sp, s0, -256 ; CHECK-NEXT: .cfi_def_cfa sp, 256 ; CHECK-NEXT: ld ra, 248(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 240(sp) # 8-byte Folded Reload ; CHECK-NEXT: .cfi_restore ra ; CHECK-NEXT: .cfi_restore s0 ; CHECK-NEXT: addi sp, sp, 256 ; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %a = load <2 x i32>, ptr %pa %b = load <32 x i32>, ptr %pb %r = call <32 x i32> @split_vector_args(<2 x i32> %a, <2 x i32> %a, <2 x i32> %a, <2 x i32> %a, <2 x i32> %a, <32 x i32> %b, <32 x i32> %b) ret <32 x i32> %r } ; A rather pathological test case in which we exhaust all vector registers and ; all scalar registers, forcing %z and %8 to go through the stack. define <32 x i32> @vector_arg_via_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %8) { ; CHECK-LABEL: vector_arg_via_stack: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (sp) ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret %s = add <32 x i32> %x, %z ret <32 x i32> %s } ; Calling the function above. Ensure we pass the arguments correctly. define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z) { ; CHECK-LABEL: pass_vector_arg_via_stack: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -144 ; CHECK-NEXT: .cfi_def_cfa_offset 144 ; CHECK-NEXT: sd ra, 136(sp) # 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset ra, -8 ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: li t0, 8 ; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: li a2, 2 ; CHECK-NEXT: li a3, 3 ; CHECK-NEXT: li a4, 4 ; CHECK-NEXT: li a5, 5 ; CHECK-NEXT: li a6, 6 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse32.v v8, (sp) ; CHECK-NEXT: li a7, 7 ; CHECK-NEXT: sd t0, 128(sp) ; CHECK-NEXT: li a0, 0 ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: call vector_arg_via_stack ; CHECK-NEXT: ld ra, 136(sp) # 8-byte Folded Reload ; CHECK-NEXT: .cfi_restore ra ; CHECK-NEXT: addi sp, sp, 144 ; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %s = call <32 x i32> @vector_arg_via_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8) ret <32 x i32> %s } ; Another pathological case but where a small mask vector must be passed on the ; stack. define <4 x i1> @vector_mask_arg_via_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %8, <4 x i1> %9, <4 x i1> %10) { ; CHECK-LABEL: vector_mask_arg_via_stack: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, sp, 136 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret ret <4 x i1> %10 } ; Calling the function above. Ensure we pass the mask arguments correctly. We ; legalize stores of small masks such that the value is at least byte-sized. define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) { ; CHECK-LABEL: pass_vector_mask_arg_via_stack: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -160 ; CHECK-NEXT: .cfi_def_cfa_offset 160 ; CHECK-NEXT: sd ra, 152(sp) # 8-byte Folded Spill ; CHECK-NEXT: .cfi_offset ra, -8 ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: li a1, 8 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v17, 0 ; CHECK-NEXT: addi a2, sp, 136 ; CHECK-NEXT: li a5, 5 ; CHECK-NEXT: li a6, 6 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: sd a1, 128(sp) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vse32.v v8, (sp) ; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vmv.v.v v17, v16 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v16, v17, 0 ; CHECK-NEXT: li a7, 7 ; CHECK-NEXT: vsm.v v16, (a2) ; CHECK-NEXT: li a0, 0 ; CHECK-NEXT: li a1, 0 ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: li a4, 0 ; CHECK-NEXT: vmv8r.v v16, v8 ; CHECK-NEXT: call vector_mask_arg_via_stack ; CHECK-NEXT: ld ra, 152(sp) # 8-byte Folded Reload ; CHECK-NEXT: .cfi_restore ra ; CHECK-NEXT: addi sp, sp, 160 ; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret %r = call <4 x i1> @vector_mask_arg_via_stack(i32 0, i32 0, i32 0, i32 0, i32 0, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8, <4 x i1> %v, <4 x i1> %v) ret <4 x i1> %r }