1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64 < %s -verify-machineinstrs | FileCheck %s
; RUN: llc -mtriple=aarch64 < %s -verify-machineinstrs -global-isel -global-isel-abort=2 | FileCheck %s
; Dynamically-sized allocation, needs a loop which can handle any size at
; runtime. The final iteration of the loop will temporarily put SP below the
; target address, but this doesn't break any of the ABI constraints on the
; stack, and also doesn't probe below the target SP value.
define void @dynamic(i64 %size, ptr %out) #0 {
; CHECK-LABEL: dynamic:
; CHECK: // %bb.0:
; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: mov x29, sp
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: add x9, x0, #15
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: and x9, x9, #0xfffffffffffffff0
; CHECK-NEXT: sub x8, x8, x9
; CHECK-NEXT: .LBB0_1: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096
; CHECK-NEXT: cmp sp, x8
; CHECK-NEXT: b.le .LBB0_3
; CHECK-NEXT: // %bb.2: // in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: b .LBB0_1
; CHECK-NEXT: .LBB0_3:
; CHECK-NEXT: mov sp, x8
; CHECK-NEXT: ldr xzr, [sp]
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: mov sp, x29
; CHECK-NEXT: .cfi_def_cfa wsp, 16
; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: .cfi_restore w30
; CHECK-NEXT: .cfi_restore w29
; CHECK-NEXT: ret
%v = alloca i8, i64 %size, align 1
store ptr %v, ptr %out, align 8
ret void
}
; This function has a fixed-size stack slot and a dynamic one. The fixed size
; slot isn't large enough that we would normally probe it, but we need to do so
; here otherwise the gap between the CSR save and the first probe of the
; dynamic allocation could be too far apart when the size of the dynamic
; allocation is close to the guard size.
define void @dynamic_fixed(i64 %size, ptr %out1, ptr %out2) #0 {
; CHECK-LABEL: dynamic_fixed:
; CHECK: // %bb.0:
; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: mov x29, sp
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: str xzr, [sp, #-64]!
; CHECK-NEXT: add x9, x0, #15
; CHECK-NEXT: mov x8, sp
; CHECK-DAG: sub x10, x29, #64
; CHECK-DAG: and x9, x9, #0xfffffffffffffff0
; CHECK-NOT: INVALID_TO_BREAK_UP_CHECK_DAG
; CHECK-DAG: str x10, [x1]
; CHECK-DAG: sub x8, x8, x9
; CHECK-NEXT: .LBB1_1: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096
; CHECK-NEXT: cmp sp, x8
; CHECK-NEXT: b.le .LBB1_3
; CHECK-NEXT: // %bb.2: // in Loop: Header=BB1_1 Depth=1
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: b .LBB1_1
; CHECK-NEXT: .LBB1_3:
; CHECK-NEXT: mov sp, x8
; CHECK-NEXT: ldr xzr, [sp]
; CHECK-NEXT: str x8, [x2]
; CHECK-NEXT: mov sp, x29
; CHECK-NEXT: .cfi_def_cfa wsp, 16
; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: .cfi_restore w30
; CHECK-NEXT: .cfi_restore w29
; CHECK-NEXT: ret
%v1 = alloca i8, i64 64, align 1
store ptr %v1, ptr %out1, align 8
%v2 = alloca i8, i64 %size, align 1
store ptr %v2, ptr %out2, align 8
ret void
}
; Dynamic allocation, with an alignment requirement greater than the alignment
; of SP. Done by ANDing the target SP with a constant to align it down, then
; doing the loop as normal. Note that we also re-align the stack in the prolog,
; which isn't actually needed because the only aligned allocations are dynamic,
; this is done even without stack probing.
define void @dynamic_align_64(i64 %size, ptr %out) #0 {
; CHECK-LABEL: dynamic_align_64:
; CHECK: // %bb.0:
; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
; CHECK-NEXT: mov x29, sp
; CHECK-NEXT: .cfi_def_cfa w29, 32
; CHECK-NEXT: .cfi_offset w19, -16
; CHECK-NEXT: .cfi_offset w30, -24
; CHECK-NEXT: .cfi_offset w29, -32
; CHECK-NEXT: sub x9, sp, #32
; CHECK-NEXT: and sp, x9, #0xffffffffffffffc0
; CHECK-NEXT: add x9, x0, #15
; CHECK-NEXT: mov x8, sp
; CHECK-DAG: str xzr, [sp]
; CHECK-DAG: and x9, x9, #0xfffffffffffffff0
; CHECK-NOT: INVALID_TO_BREAK_UP_CHECK_DAG
; CHECK-DAG: mov x19, sp
; CHECK-DAG: sub x8, x8, x9
; CHECK-NEXT: and x8, x8, #0xffffffffffffffc0
; CHECK-NEXT: .LBB2_1: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096
; CHECK-NEXT: cmp sp, x8
; CHECK-NEXT: b.le .LBB2_3
; CHECK-NEXT: // %bb.2: // in Loop: Header=BB2_1 Depth=1
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: b .LBB2_1
; CHECK-NEXT: .LBB2_3:
; CHECK-NEXT: mov sp, x8
; CHECK-NEXT: ldr xzr, [sp]
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: mov sp, x29
; CHECK-NEXT: .cfi_def_cfa wsp, 32
; CHECK-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: .cfi_restore w19
; CHECK-NEXT: .cfi_restore w30
; CHECK-NEXT: .cfi_restore w29
; CHECK-NEXT: ret
%v = alloca i8, i64 %size, align 64
store ptr %v, ptr %out, align 8
ret void
}
; Dynamic allocation, with an alignment greater than the stack guard size. The
; only difference to the dynamic allocation is the constant used for aligning
; the target SP, the loop will probe the whole allocation without needing to
; know about the alignment padding.
define void @dynamic_align_8192(i64 %size, ptr %out) #0 {
; CHECK-LABEL: dynamic_align_8192:
; CHECK: // %bb.0:
; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
; CHECK-NEXT: mov x29, sp
; CHECK-NEXT: .cfi_def_cfa w29, 32
; CHECK-NEXT: .cfi_offset w19, -16
; CHECK-NEXT: .cfi_offset w30, -24
; CHECK-NEXT: .cfi_offset w29, -32
; CHECK-NEXT: sub x9, sp, #1, lsl #12 // =4096
; CHECK-NEXT: sub x9, x9, #4064
; CHECK-NEXT: and x9, x9, #0xffffffffffffe000
; CHECK-NEXT: .LBB3_1: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096
; CHECK-NEXT: cmp sp, x9
; CHECK-NEXT: b.le .LBB3_3
; CHECK-NEXT: // %bb.2: // in Loop: Header=BB3_1 Depth=1
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: b .LBB3_1
; CHECK-NEXT: .LBB3_3:
; CHECK-NEXT: mov sp, x9
; CHECK-NEXT: add x9, x0, #15
; CHECK-NEXT: mov x8, sp
; CHECK-DAG: ldr xzr, [sp]
; CHECK-DAG: and x9, x9, #0xfffffffffffffff0
; CHECK-NOT: INVALID_TO_BREAK_UP_CHECK_DAG
; CHECK-DAG: mov x19, sp
; CHECK-DAG: sub x8, x8, x9
; CHECK-NEXT: and x8, x8, #0xffffffffffffe000
; CHECK-NEXT: .LBB3_4: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096
; CHECK-NEXT: cmp sp, x8
; CHECK-NEXT: b.le .LBB3_6
; CHECK-NEXT: // %bb.5: // in Loop: Header=BB3_4 Depth=1
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: b .LBB3_4
; CHECK-NEXT: .LBB3_6:
; CHECK-NEXT: mov sp, x8
; CHECK-NEXT: ldr xzr, [sp]
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: mov sp, x29
; CHECK-NEXT: .cfi_def_cfa wsp, 32
; CHECK-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: .cfi_restore w19
; CHECK-NEXT: .cfi_restore w30
; CHECK-NEXT: .cfi_restore w29
; CHECK-NEXT: ret
%v = alloca i8, i64 %size, align 8192
store ptr %v, ptr %out, align 8
ret void
}
; For 64k guard pages, the only difference is the constant subtracted from SP
; in the loop.
define void @dynamic_64k_guard(i64 %size, ptr %out) #0 "stack-probe-size"="65536" {
; CHECK-LABEL: dynamic_64k_guard:
; CHECK: // %bb.0:
; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: mov x29, sp
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: add x9, x0, #15
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: and x9, x9, #0xfffffffffffffff0
; CHECK-NEXT: sub x8, x8, x9
; CHECK-NEXT: .LBB4_1: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: sub sp, sp, #16, lsl #12 // =65536
; CHECK-NEXT: cmp sp, x8
; CHECK-NEXT: b.le .LBB4_3
; CHECK-NEXT: // %bb.2: // in Loop: Header=BB4_1 Depth=1
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: b .LBB4_1
; CHECK-NEXT: .LBB4_3:
; CHECK-NEXT: mov sp, x8
; CHECK-NEXT: ldr xzr, [sp]
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: mov sp, x29
; CHECK-NEXT: .cfi_def_cfa wsp, 16
; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: .cfi_restore w30
; CHECK-NEXT: .cfi_restore w29
; CHECK-NEXT: ret
%v = alloca i8, i64 %size, align 1
store ptr %v, ptr %out, align 8
ret void
}
; If a function has variable-sized stack objects, then any function calls which
; need to pass arguments on the stack must allocate the stack space for them
; dynamically, to ensure they are at the bottom of the frame. We need to probe
; that space when it is larger than the unprobed space allowed by the ABI (1024
; bytes), so this needs a very large number of arguments.
define void @no_reserved_call_frame(i64 %n) #0 {
; CHECK-LABEL: no_reserved_call_frame:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: mov x29, sp
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: lsl x9, x0, #2
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: add x9, x9, #15
; CHECK-NEXT: and x9, x9, #0xfffffffffffffff0
; CHECK-NEXT: sub x0, x8, x9
; CHECK-NEXT: .LBB5_1: // %entry
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096
; CHECK-NEXT: cmp sp, x0
; CHECK-NEXT: b.le .LBB5_3
; CHECK-NEXT: // %bb.2: // %entry
; CHECK-NEXT: // in Loop: Header=BB5_1 Depth=1
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: b .LBB5_1
; CHECK-NEXT: .LBB5_3: // %entry
; CHECK-NEXT: mov sp, x0
; CHECK-NEXT: ldr xzr, [sp]
; CHECK-NEXT: sub sp, sp, #1104
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: bl callee_stack_args
; CHECK-NEXT: add sp, sp, #1104
; CHECK-NEXT: mov sp, x29
; CHECK-NEXT: .cfi_def_cfa wsp, 16
; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: .cfi_restore w30
; CHECK-NEXT: .cfi_restore w29
; CHECK-NEXT: ret
entry:
%v = alloca i32, i64 %n
call void @callee_stack_args(ptr %v, [138 x i64] undef)
ret void
}
; Same as above but without a variable-sized allocation, so the reserved call
; frame can be folded into the fixed-size allocation in the prologue.
define void @reserved_call_frame(i64 %n) #0 {
; CHECK-LABEL: reserved_call_frame:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: str x28, [sp, #16] // 8-byte Folded Spill
; CHECK-NEXT: mov x29, sp
; CHECK-NEXT: .cfi_def_cfa w29, 32
; CHECK-NEXT: .cfi_offset w28, -16
; CHECK-NEXT: .cfi_offset w30, -24
; CHECK-NEXT: .cfi_offset w29, -32
; CHECK-NEXT: sub sp, sp, #1504
; CHECK-NEXT: add x0, sp, #1104
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: bl callee_stack_args
; CHECK-NEXT: add sp, sp, #1504
; CHECK-NEXT: .cfi_def_cfa wsp, 32
; CHECK-NEXT: ldr x28, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: .cfi_restore w28
; CHECK-NEXT: .cfi_restore w30
; CHECK-NEXT: .cfi_restore w29
; CHECK-NEXT: ret
entry:
%v = alloca i32, i64 100
call void @callee_stack_args(ptr %v, [138 x i64] undef)
ret void
}
declare void @callee_stack_args(ptr, [138 x i64])
; Dynamic allocation of SVE vectors
define void @dynamic_sve(i64 %size, ptr %out) #0 "target-features"="+sve" {
; CHECK-LABEL: dynamic_sve:
; CHECK: // %bb.0:
; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
; CHECK-NEXT: mov x29, sp
; CHECK-NEXT: .cfi_def_cfa w29, 32
; CHECK-NEXT: .cfi_offset w19, -16
; CHECK-NEXT: .cfi_offset w30, -24
; CHECK-NEXT: .cfi_offset w29, -32
; CHECK-NEXT: rdvl x9, #1
; CHECK-NEXT: mov x10, #15 // =0xf
; CHECK-DAG: mov x8, sp
; CHECK-DAG: madd x9, x0, x9, x10
; CHECK-NEXT: and x9, x9, #0xfffffffffffffff0
; CHECK-NEXT: sub x8, x8, x9
; CHECK-NEXT: .LBB7_1: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096
; CHECK-NEXT: cmp sp, x8
; CHECK-NEXT: b.le .LBB7_3
; CHECK-NEXT: // %bb.2: // in Loop: Header=BB7_1 Depth=1
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: b .LBB7_1
; CHECK-NEXT: .LBB7_3:
; CHECK-NEXT: mov sp, x8
; CHECK-NEXT: ldr xzr, [sp]
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: mov sp, x29
; CHECK-NEXT: .cfi_def_cfa wsp, 32
; CHECK-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: .cfi_restore w19
; CHECK-NEXT: .cfi_restore w30
; CHECK-NEXT: .cfi_restore w29
; CHECK-NEXT: ret
%v = alloca <vscale x 4 x float>, i64 %size, align 16
store ptr %v, ptr %out, align 8
ret void
}
attributes #0 = { uwtable(async) "probe-stack"="inline-asm" "frame-pointer"="none" }
|