1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32I
; RUN: llc -mtriple=riscv32 -target-abi ilp32e -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32I-ILP32E
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64I
; RUN: llc -mtriple=riscv64 -target-abi lp64e -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64I-LP64E
declare void @callee(ptr, ptr)
define void @caller(i32 %n) {
; RV32I-LABEL: caller:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -64
; RV32I-NEXT: .cfi_def_cfa_offset 64
; RV32I-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
; RV32I-NEXT: .cfi_offset ra, -4
; RV32I-NEXT: .cfi_offset s0, -8
; RV32I-NEXT: .cfi_offset s1, -12
; RV32I-NEXT: addi s0, sp, 64
; RV32I-NEXT: .cfi_def_cfa s0, 0
; RV32I-NEXT: andi sp, sp, -64
; RV32I-NEXT: mv s1, sp
; RV32I-NEXT: addi a0, a0, 15
; RV32I-NEXT: andi a0, a0, -16
; RV32I-NEXT: sub a0, sp, a0
; RV32I-NEXT: mv sp, a0
; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: call callee
; RV32I-NEXT: addi sp, s0, -64
; RV32I-NEXT: .cfi_def_cfa sp, 64
; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
; RV32I-NEXT: .cfi_restore ra
; RV32I-NEXT: .cfi_restore s0
; RV32I-NEXT: .cfi_restore s1
; RV32I-NEXT: addi sp, sp, 64
; RV32I-NEXT: .cfi_def_cfa_offset 0
; RV32I-NEXT: ret
;
; RV32I-ILP32E-LABEL: caller:
; RV32I-ILP32E: # %bb.0:
; RV32I-ILP32E-NEXT: addi sp, sp, -64
; RV32I-ILP32E-NEXT: .cfi_def_cfa_offset 64
; RV32I-ILP32E-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
; RV32I-ILP32E-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
; RV32I-ILP32E-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
; RV32I-ILP32E-NEXT: .cfi_offset ra, -4
; RV32I-ILP32E-NEXT: .cfi_offset s0, -8
; RV32I-ILP32E-NEXT: .cfi_offset s1, -12
; RV32I-ILP32E-NEXT: addi s0, sp, 64
; RV32I-ILP32E-NEXT: .cfi_def_cfa s0, 0
; RV32I-ILP32E-NEXT: andi sp, sp, -64
; RV32I-ILP32E-NEXT: mv s1, sp
; RV32I-ILP32E-NEXT: addi a0, a0, 3
; RV32I-ILP32E-NEXT: andi a0, a0, -4
; RV32I-ILP32E-NEXT: sub a0, sp, a0
; RV32I-ILP32E-NEXT: mv sp, a0
; RV32I-ILP32E-NEXT: mv a1, s1
; RV32I-ILP32E-NEXT: call callee
; RV32I-ILP32E-NEXT: addi sp, s0, -64
; RV32I-ILP32E-NEXT: .cfi_def_cfa sp, 64
; RV32I-ILP32E-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32I-ILP32E-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
; RV32I-ILP32E-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
; RV32I-ILP32E-NEXT: .cfi_restore ra
; RV32I-ILP32E-NEXT: .cfi_restore s0
; RV32I-ILP32E-NEXT: .cfi_restore s1
; RV32I-ILP32E-NEXT: addi sp, sp, 64
; RV32I-ILP32E-NEXT: .cfi_def_cfa_offset 0
; RV32I-ILP32E-NEXT: ret
;
; RV64I-LABEL: caller:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -64
; RV64I-NEXT: .cfi_def_cfa_offset 64
; RV64I-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: .cfi_offset s0, -16
; RV64I-NEXT: .cfi_offset s1, -24
; RV64I-NEXT: addi s0, sp, 64
; RV64I-NEXT: .cfi_def_cfa s0, 0
; RV64I-NEXT: andi sp, sp, -64
; RV64I-NEXT: mv s1, sp
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: addi a0, a0, 15
; RV64I-NEXT: andi a0, a0, -16
; RV64I-NEXT: sub a0, sp, a0
; RV64I-NEXT: mv sp, a0
; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: call callee
; RV64I-NEXT: addi sp, s0, -64
; RV64I-NEXT: .cfi_def_cfa sp, 64
; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: .cfi_restore ra
; RV64I-NEXT: .cfi_restore s0
; RV64I-NEXT: .cfi_restore s1
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: .cfi_def_cfa_offset 0
; RV64I-NEXT: ret
;
; RV64I-LP64E-LABEL: caller:
; RV64I-LP64E: # %bb.0:
; RV64I-LP64E-NEXT: addi sp, sp, -64
; RV64I-LP64E-NEXT: .cfi_def_cfa_offset 64
; RV64I-LP64E-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
; RV64I-LP64E-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
; RV64I-LP64E-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
; RV64I-LP64E-NEXT: .cfi_offset ra, -8
; RV64I-LP64E-NEXT: .cfi_offset s0, -16
; RV64I-LP64E-NEXT: .cfi_offset s1, -24
; RV64I-LP64E-NEXT: addi s0, sp, 64
; RV64I-LP64E-NEXT: .cfi_def_cfa s0, 0
; RV64I-LP64E-NEXT: andi sp, sp, -64
; RV64I-LP64E-NEXT: mv s1, sp
; RV64I-LP64E-NEXT: slli a0, a0, 32
; RV64I-LP64E-NEXT: srli a0, a0, 32
; RV64I-LP64E-NEXT: addi a0, a0, 7
; RV64I-LP64E-NEXT: andi a0, a0, -8
; RV64I-LP64E-NEXT: sub a0, sp, a0
; RV64I-LP64E-NEXT: mv sp, a0
; RV64I-LP64E-NEXT: mv a1, s1
; RV64I-LP64E-NEXT: call callee
; RV64I-LP64E-NEXT: addi sp, s0, -64
; RV64I-LP64E-NEXT: .cfi_def_cfa sp, 64
; RV64I-LP64E-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; RV64I-LP64E-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; RV64I-LP64E-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
; RV64I-LP64E-NEXT: .cfi_restore ra
; RV64I-LP64E-NEXT: .cfi_restore s0
; RV64I-LP64E-NEXT: .cfi_restore s1
; RV64I-LP64E-NEXT: addi sp, sp, 64
; RV64I-LP64E-NEXT: .cfi_def_cfa_offset 0
; RV64I-LP64E-NEXT: ret
%1 = alloca i8, i32 %n
%2 = alloca i32, align 64
call void @callee(ptr %1, ptr %2)
ret void
}
|