; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple riscv64 < %s -o - | FileCheck %s %"buff" = type { [4096 x i64] } declare void @llvm.memset.p0.i64(ptr, i8, i64, i1) declare void @bar() define i1 @foo() nounwind "probe-stack"="inline-asm" "target-features"="+v" { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -2032 ; CHECK-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill ; CHECK-NEXT: sd s0, 2016(sp) # 8-byte Folded Spill ; CHECK-NEXT: sd s1, 2008(sp) # 8-byte Folded Spill ; CHECK-NEXT: sd s2, 2000(sp) # 8-byte Folded Spill ; CHECK-NEXT: sd s3, 1992(sp) # 8-byte Folded Spill ; CHECK-NEXT: lui a0, 7 ; CHECK-NEXT: sub t1, sp, a0 ; CHECK-NEXT: lui t2, 1 ; CHECK-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sub sp, sp, t2 ; CHECK-NEXT: sd zero, 0(sp) ; CHECK-NEXT: bne sp, t1, .LBB0_1 ; CHECK-NEXT: # %bb.2: ; CHECK-NEXT: addi sp, sp, -2048 ; CHECK-NEXT: addi sp, sp, -96 ; CHECK-NEXT: csrr t1, vlenb ; CHECK-NEXT: lui t2, 1 ; CHECK-NEXT: .LBB0_3: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: sub sp, sp, t2 ; CHECK-NEXT: sd zero, 0(sp) ; CHECK-NEXT: sub t1, t1, t2 ; CHECK-NEXT: bge t1, t2, .LBB0_3 ; CHECK-NEXT: # %bb.4: ; CHECK-NEXT: sub sp, sp, t1 ; CHECK-NEXT: li a0, 86 ; CHECK-NEXT: addi s0, sp, 48 ; CHECK-NEXT: addi s1, sp, 32 ; CHECK-NEXT: addi s2, sp, 16 ; CHECK-NEXT: lui a1, 353637 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: lui a0, 8 ; CHECK-NEXT: addi a0, a0, 32 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: vs1r.v v8, (a0) # vscale x 8-byte Folded Spill ; CHECK-NEXT: addi a0, a1, 1622 ; CHECK-NEXT: vse8.v v8, (s0) ; CHECK-NEXT: vse8.v v8, (s1) ; CHECK-NEXT: vse8.v v8, (s2) ; CHECK-NEXT: slli a1, a0, 32 ; CHECK-NEXT: add s3, a0, a1 ; CHECK-NEXT: sd s3, 64(sp) ; CHECK-NEXT: call bar ; CHECK-NEXT: lui a0, 8 ; CHECK-NEXT: addi a0, a0, 32 ; CHECK-NEXT: add a0, sp, a0 ; CHECK-NEXT: vl1r.v v8, (a0) # vscale x 8-byte Folded Reload ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vse8.v v8, (s0) ; CHECK-NEXT: vse8.v v8, (s1) ; CHECK-NEXT: vse8.v v8, (s2) ; CHECK-NEXT: sd s3, 64(sp) ; CHECK-NEXT: li a0, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: add sp, sp, a1 ; CHECK-NEXT: lui a1, 8 ; CHECK-NEXT: addi a1, a1, -1952 ; CHECK-NEXT: add sp, sp, a1 ; CHECK-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s1, 2008(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s2, 2000(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s3, 1992(sp) # 8-byte Folded Reload ; CHECK-NEXT: addi sp, sp, 2032 ; CHECK-NEXT: ret %1 = alloca %"buff", align 8 call void @llvm.memset.p0.i64(ptr %1, i8 86, i64 56, i1 false) call void @bar() call void @llvm.memset.p0.i64(ptr %1, i8 86, i64 56, i1 false) ret i1 false }