; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=RV32,RV32I ; RUN: llc -mtriple=riscv32 -verify-machineinstrs -mattr=+zba < %s \ ; RUN: | FileCheck %s -check-prefixes=RV32,RV32ZBA ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=RV64,RV64I ; RUN: llc -mtriple=riscv64 -verify-machineinstrs -mattr=+zba < %s \ ; RUN: | FileCheck %s -check-prefixes=RV64,RV64ZBA declare void @callee(ptr) define void @frame_16b() { ; RV32-LABEL: frame_16b: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: li a0, 0 ; RV32-NEXT: call callee ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: frame_16b: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: li a0, 0 ; RV64-NEXT: call callee ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret call void @callee(ptr null) ret void } define void @frame_1024b() { ; RV32-LABEL: frame_1024b: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -1024 ; RV32-NEXT: .cfi_def_cfa_offset 1024 ; RV32-NEXT: sw ra, 1020(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: addi a0, sp, 12 ; RV32-NEXT: call callee ; RV32-NEXT: lw ra, 1020(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 1024 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: frame_1024b: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -1024 ; RV64-NEXT: .cfi_def_cfa_offset 1024 ; RV64-NEXT: sd ra, 1016(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: addi a0, sp, 8 ; RV64-NEXT: call callee ; RV64-NEXT: ld ra, 1016(sp) # 8-byte Folded Reload ; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 1024 ; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = alloca [1008 x i8] call void @callee(ptr %a) ret void } define void @frame_2048b() { ; RV32-LABEL: frame_2048b: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -2032 ; RV32-NEXT: .cfi_def_cfa_offset 2032 ; RV32-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 2048 ; RV32-NEXT: addi a0, sp, 12 ; RV32-NEXT: call callee ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 2032 ; RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 2032 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: frame_2048b: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -2032 ; RV64-NEXT: .cfi_def_cfa_offset 2032 ; RV64-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 2048 ; RV64-NEXT: addi a0, sp, 8 ; RV64-NEXT: call callee ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: .cfi_def_cfa_offset 2032 ; RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload ; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 2032 ; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = alloca [2032 x i8] call void @callee(ptr %a) ret void } define void @frame_4096b() { ; RV32-LABEL: frame_4096b: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -2032 ; RV32-NEXT: .cfi_def_cfa_offset 2032 ; RV32-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: addi sp, sp, -2048 ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 4096 ; RV32-NEXT: addi a0, sp, 12 ; RV32-NEXT: call callee ; RV32-NEXT: addi sp, sp, 2032 ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: .cfi_def_cfa_offset 2032 ; RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 2032 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: frame_4096b: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -2032 ; RV64-NEXT: .cfi_def_cfa_offset 2032 ; RV64-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: addi sp, sp, -2048 ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 4096 ; RV64-NEXT: addi a0, sp, 8 ; RV64-NEXT: call callee ; RV64-NEXT: addi sp, sp, 2032 ; RV64-NEXT: addi sp, sp, 32 ; RV64-NEXT: .cfi_def_cfa_offset 2032 ; RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload ; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 2032 ; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = alloca [4080 x i8] call void @callee(ptr %a) ret void } ;; 2^12-16+2032 define void @frame_4kb() { ; RV32-LABEL: frame_4kb: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -2032 ; RV32-NEXT: .cfi_def_cfa_offset 2032 ; RV32-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: .cfi_def_cfa_offset 6128 ; RV32-NEXT: addi a0, sp, 12 ; RV32-NEXT: call callee ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: .cfi_def_cfa_offset 2032 ; RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 2032 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: frame_4kb: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -2032 ; RV64-NEXT: .cfi_def_cfa_offset 2032 ; RV64-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: .cfi_def_cfa_offset 6128 ; RV64-NEXT: addi a0, sp, 8 ; RV64-NEXT: call callee ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: .cfi_def_cfa_offset 2032 ; RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload ; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 2032 ; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = alloca [6112 x i8] call void @callee(ptr %a) ret void } define void @frame_4kb_offset_128() { ; RV32I-LABEL: frame_4kb_offset_128: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -2032 ; RV32I-NEXT: .cfi_def_cfa_offset 2032 ; RV32I-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill ; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: lui a0, 1 ; RV32I-NEXT: addi a0, a0, 128 ; RV32I-NEXT: sub sp, sp, a0 ; RV32I-NEXT: .cfi_def_cfa_offset 6256 ; RV32I-NEXT: addi a0, sp, 12 ; RV32I-NEXT: call callee ; RV32I-NEXT: lui a0, 1 ; RV32I-NEXT: addi a0, a0, 128 ; RV32I-NEXT: add sp, sp, a0 ; RV32I-NEXT: .cfi_def_cfa_offset 2032 ; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload ; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 2032 ; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: frame_4kb_offset_128: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi sp, sp, -2032 ; RV32ZBA-NEXT: .cfi_def_cfa_offset 2032 ; RV32ZBA-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill ; RV32ZBA-NEXT: .cfi_offset ra, -4 ; RV32ZBA-NEXT: li a0, -528 ; RV32ZBA-NEXT: sh3add sp, a0, sp ; RV32ZBA-NEXT: .cfi_def_cfa_offset 6256 ; RV32ZBA-NEXT: addi a0, sp, 12 ; RV32ZBA-NEXT: call callee ; RV32ZBA-NEXT: li a0, 528 ; RV32ZBA-NEXT: sh3add sp, a0, sp ; RV32ZBA-NEXT: .cfi_def_cfa_offset 2032 ; RV32ZBA-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: .cfi_restore ra ; RV32ZBA-NEXT: addi sp, sp, 2032 ; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64I-LABEL: frame_4kb_offset_128: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -2032 ; RV64I-NEXT: .cfi_def_cfa_offset 2032 ; RV64I-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill ; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: lui a0, 1 ; RV64I-NEXT: addi a0, a0, 128 ; RV64I-NEXT: sub sp, sp, a0 ; RV64I-NEXT: .cfi_def_cfa_offset 6256 ; RV64I-NEXT: addi a0, sp, 8 ; RV64I-NEXT: call callee ; RV64I-NEXT: lui a0, 1 ; RV64I-NEXT: addi a0, a0, 128 ; RV64I-NEXT: add sp, sp, a0 ; RV64I-NEXT: .cfi_def_cfa_offset 2032 ; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload ; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 2032 ; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: frame_4kb_offset_128: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: addi sp, sp, -2032 ; RV64ZBA-NEXT: .cfi_def_cfa_offset 2032 ; RV64ZBA-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill ; RV64ZBA-NEXT: .cfi_offset ra, -8 ; RV64ZBA-NEXT: li a0, -528 ; RV64ZBA-NEXT: sh3add sp, a0, sp ; RV64ZBA-NEXT: .cfi_def_cfa_offset 6256 ; RV64ZBA-NEXT: addi a0, sp, 8 ; RV64ZBA-NEXT: call callee ; RV64ZBA-NEXT: li a0, 528 ; RV64ZBA-NEXT: sh3add sp, a0, sp ; RV64ZBA-NEXT: .cfi_def_cfa_offset 2032 ; RV64ZBA-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload ; RV64ZBA-NEXT: .cfi_restore ra ; RV64ZBA-NEXT: addi sp, sp, 2032 ; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = alloca [6240 x i8] call void @callee(ptr %a) ret void } ;; 2^13-16+2032 define void @frame_8kb() { ; RV32-LABEL: frame_8kb: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -2032 ; RV32-NEXT: .cfi_def_cfa_offset 2032 ; RV32-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: lui a0, 2 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: .cfi_def_cfa_offset 10224 ; RV32-NEXT: addi a0, sp, 12 ; RV32-NEXT: call callee ; RV32-NEXT: lui a0, 2 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: .cfi_def_cfa_offset 2032 ; RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 2032 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: frame_8kb: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -2032 ; RV64-NEXT: .cfi_def_cfa_offset 2032 ; RV64-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: lui a0, 2 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: .cfi_def_cfa_offset 10224 ; RV64-NEXT: addi a0, sp, 8 ; RV64-NEXT: call callee ; RV64-NEXT: lui a0, 2 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: .cfi_def_cfa_offset 2032 ; RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload ; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 2032 ; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = alloca [10208 x i8] call void @callee(ptr %a) ret void } define void @frame_8kb_offset_128() { ; RV32I-LABEL: frame_8kb_offset_128: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -2032 ; RV32I-NEXT: .cfi_def_cfa_offset 2032 ; RV32I-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill ; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: lui a0, 2 ; RV32I-NEXT: addi a0, a0, 128 ; RV32I-NEXT: sub sp, sp, a0 ; RV32I-NEXT: .cfi_def_cfa_offset 10352 ; RV32I-NEXT: addi a0, sp, 12 ; RV32I-NEXT: call callee ; RV32I-NEXT: lui a0, 2 ; RV32I-NEXT: addi a0, a0, 128 ; RV32I-NEXT: add sp, sp, a0 ; RV32I-NEXT: .cfi_def_cfa_offset 2032 ; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload ; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 2032 ; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: frame_8kb_offset_128: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi sp, sp, -2032 ; RV32ZBA-NEXT: .cfi_def_cfa_offset 2032 ; RV32ZBA-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill ; RV32ZBA-NEXT: .cfi_offset ra, -4 ; RV32ZBA-NEXT: li a0, -1040 ; RV32ZBA-NEXT: sh3add sp, a0, sp ; RV32ZBA-NEXT: .cfi_def_cfa_offset 10352 ; RV32ZBA-NEXT: addi a0, sp, 12 ; RV32ZBA-NEXT: call callee ; RV32ZBA-NEXT: li a0, 1040 ; RV32ZBA-NEXT: sh3add sp, a0, sp ; RV32ZBA-NEXT: .cfi_def_cfa_offset 2032 ; RV32ZBA-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: .cfi_restore ra ; RV32ZBA-NEXT: addi sp, sp, 2032 ; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64I-LABEL: frame_8kb_offset_128: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -2032 ; RV64I-NEXT: .cfi_def_cfa_offset 2032 ; RV64I-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill ; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: lui a0, 2 ; RV64I-NEXT: addi a0, a0, 128 ; RV64I-NEXT: sub sp, sp, a0 ; RV64I-NEXT: .cfi_def_cfa_offset 10352 ; RV64I-NEXT: addi a0, sp, 8 ; RV64I-NEXT: call callee ; RV64I-NEXT: lui a0, 2 ; RV64I-NEXT: addi a0, a0, 128 ; RV64I-NEXT: add sp, sp, a0 ; RV64I-NEXT: .cfi_def_cfa_offset 2032 ; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload ; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 2032 ; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: frame_8kb_offset_128: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: addi sp, sp, -2032 ; RV64ZBA-NEXT: .cfi_def_cfa_offset 2032 ; RV64ZBA-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill ; RV64ZBA-NEXT: .cfi_offset ra, -8 ; RV64ZBA-NEXT: li a0, -1040 ; RV64ZBA-NEXT: sh3add sp, a0, sp ; RV64ZBA-NEXT: .cfi_def_cfa_offset 10352 ; RV64ZBA-NEXT: addi a0, sp, 8 ; RV64ZBA-NEXT: call callee ; RV64ZBA-NEXT: li a0, 1040 ; RV64ZBA-NEXT: sh3add sp, a0, sp ; RV64ZBA-NEXT: .cfi_def_cfa_offset 2032 ; RV64ZBA-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload ; RV64ZBA-NEXT: .cfi_restore ra ; RV64ZBA-NEXT: addi sp, sp, 2032 ; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = alloca [10336 x i8] call void @callee(ptr %a) ret void } define void @frame_16kb_minus_80() { ; RV32I-LABEL: frame_16kb_minus_80: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -2032 ; RV32I-NEXT: .cfi_def_cfa_offset 2032 ; RV32I-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill ; RV32I-NEXT: .cfi_offset ra, -4 ; RV32I-NEXT: lui a0, 4 ; RV32I-NEXT: addi a0, a0, -80 ; RV32I-NEXT: sub sp, sp, a0 ; RV32I-NEXT: .cfi_def_cfa_offset 18336 ; RV32I-NEXT: addi a0, sp, 12 ; RV32I-NEXT: call callee ; RV32I-NEXT: lui a0, 4 ; RV32I-NEXT: addi a0, a0, -80 ; RV32I-NEXT: add sp, sp, a0 ; RV32I-NEXT: .cfi_def_cfa_offset 2032 ; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload ; RV32I-NEXT: .cfi_restore ra ; RV32I-NEXT: addi sp, sp, 2032 ; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV32ZBA-LABEL: frame_16kb_minus_80: ; RV32ZBA: # %bb.0: ; RV32ZBA-NEXT: addi sp, sp, -2032 ; RV32ZBA-NEXT: .cfi_def_cfa_offset 2032 ; RV32ZBA-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill ; RV32ZBA-NEXT: .cfi_offset ra, -4 ; RV32ZBA-NEXT: li a0, -2038 ; RV32ZBA-NEXT: sh3add sp, a0, sp ; RV32ZBA-NEXT: .cfi_def_cfa_offset 18336 ; RV32ZBA-NEXT: addi a0, sp, 12 ; RV32ZBA-NEXT: call callee ; RV32ZBA-NEXT: li a0, 2038 ; RV32ZBA-NEXT: sh3add sp, a0, sp ; RV32ZBA-NEXT: .cfi_def_cfa_offset 2032 ; RV32ZBA-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload ; RV32ZBA-NEXT: .cfi_restore ra ; RV32ZBA-NEXT: addi sp, sp, 2032 ; RV32ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV32ZBA-NEXT: ret ; ; RV64I-LABEL: frame_16kb_minus_80: ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -2032 ; RV64I-NEXT: .cfi_def_cfa_offset 2032 ; RV64I-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill ; RV64I-NEXT: .cfi_offset ra, -8 ; RV64I-NEXT: lui a0, 4 ; RV64I-NEXT: addi a0, a0, -80 ; RV64I-NEXT: sub sp, sp, a0 ; RV64I-NEXT: .cfi_def_cfa_offset 18336 ; RV64I-NEXT: addi a0, sp, 8 ; RV64I-NEXT: call callee ; RV64I-NEXT: lui a0, 4 ; RV64I-NEXT: addi a0, a0, -80 ; RV64I-NEXT: add sp, sp, a0 ; RV64I-NEXT: .cfi_def_cfa_offset 2032 ; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload ; RV64I-NEXT: .cfi_restore ra ; RV64I-NEXT: addi sp, sp, 2032 ; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: frame_16kb_minus_80: ; RV64ZBA: # %bb.0: ; RV64ZBA-NEXT: addi sp, sp, -2032 ; RV64ZBA-NEXT: .cfi_def_cfa_offset 2032 ; RV64ZBA-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill ; RV64ZBA-NEXT: .cfi_offset ra, -8 ; RV64ZBA-NEXT: li a0, -2038 ; RV64ZBA-NEXT: sh3add sp, a0, sp ; RV64ZBA-NEXT: .cfi_def_cfa_offset 18336 ; RV64ZBA-NEXT: addi a0, sp, 8 ; RV64ZBA-NEXT: call callee ; RV64ZBA-NEXT: li a0, 2038 ; RV64ZBA-NEXT: sh3add sp, a0, sp ; RV64ZBA-NEXT: .cfi_def_cfa_offset 2032 ; RV64ZBA-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload ; RV64ZBA-NEXT: .cfi_restore ra ; RV64ZBA-NEXT: addi sp, sp, 2032 ; RV64ZBA-NEXT: .cfi_def_cfa_offset 0 ; RV64ZBA-NEXT: ret %a = alloca [18320 x i8] call void @callee(ptr %a) ret void } ;; 2^14-16+2032 define void @frame_16kb() { ; RV32-LABEL: frame_16kb: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -2032 ; RV32-NEXT: .cfi_def_cfa_offset 2032 ; RV32-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: lui a0, 4 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: .cfi_def_cfa_offset 18416 ; RV32-NEXT: addi a0, sp, 12 ; RV32-NEXT: call callee ; RV32-NEXT: lui a0, 4 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: .cfi_def_cfa_offset 2032 ; RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 2032 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: frame_16kb: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -2032 ; RV64-NEXT: .cfi_def_cfa_offset 2032 ; RV64-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: lui a0, 4 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: .cfi_def_cfa_offset 18416 ; RV64-NEXT: addi a0, sp, 8 ; RV64-NEXT: call callee ; RV64-NEXT: lui a0, 4 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: .cfi_def_cfa_offset 2032 ; RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload ; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 2032 ; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = alloca [18400 x i8] call void @callee(ptr %a) ret void } ;; 2^15-16+2032 define void @frame_32kb() { ; RV32-LABEL: frame_32kb: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -2032 ; RV32-NEXT: .cfi_def_cfa_offset 2032 ; RV32-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: sub sp, sp, a0 ; RV32-NEXT: .cfi_def_cfa_offset 34800 ; RV32-NEXT: addi a0, sp, 12 ; RV32-NEXT: call callee ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: .cfi_def_cfa_offset 2032 ; RV32-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload ; RV32-NEXT: .cfi_restore ra ; RV32-NEXT: addi sp, sp, 2032 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: frame_32kb: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -2032 ; RV64-NEXT: .cfi_def_cfa_offset 2032 ; RV64-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: sub sp, sp, a0 ; RV64-NEXT: .cfi_def_cfa_offset 34800 ; RV64-NEXT: addi a0, sp, 8 ; RV64-NEXT: call callee ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: .cfi_def_cfa_offset 2032 ; RV64-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload ; RV64-NEXT: .cfi_restore ra ; RV64-NEXT: addi sp, sp, 2032 ; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret %a = alloca [34784 x i8] call void @callee(ptr %a) ret void }