1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature
// RUN: %clang_cc1 -triple wasm32-unknown-unknown -o - -emit-llvm %s | FileCheck %s
#include <stdarg.h>
// CHECK-LABEL: define {{[^@]+}}@test_i32
// CHECK-SAME: (ptr noundef [[FMT:%.*]], ...) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[V:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGP_CUR]], align 4
// CHECK-NEXT: store i32 [[TMP0]], ptr [[V]], align 4
// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[V]], align 4
// CHECK-NEXT: ret i32 [[TMP1]]
//
int test_i32(char *fmt, ...) {
va_list va;
va_start(va, fmt);
int v = va_arg(va, int);
va_end(va);
return v;
}
// CHECK-LABEL: define {{[^@]+}}@test_i64
// CHECK-SAME: (ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[V:%.*]] = alloca i64, align 8
// CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 7
// CHECK-NEXT: [[ARGP_CUR_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[TMP0]], i32 -8)
// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR_ALIGNED]], i32 8
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARGP_CUR_ALIGNED]], align 8
// CHECK-NEXT: store i64 [[TMP1]], ptr [[V]], align 8
// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[V]], align 8
// CHECK-NEXT: ret i64 [[TMP2]]
//
long long test_i64(char *fmt, ...) {
va_list va;
va_start(va, fmt);
long long v = va_arg(va, long long);
va_end(va);
return v;
}
struct S {
int x;
int y;
int z;
};
// CHECK-LABEL: define {{[^@]+}}@test_struct
// CHECK-SAME: (ptr dead_on_unwind noalias writable sret([[STRUCT_S:%.*]]) align 4 [[AGG_RESULT:%.*]], ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 4
// CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 4
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGP_CUR]], align 4
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[AGG_RESULT]], ptr align 4 [[TMP0]], i32 12, i1 false)
// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: ret void
//
struct S test_struct(char *fmt, ...) {
va_list va;
va_start(va, fmt);
struct S v = va_arg(va, struct S);
va_end(va);
return v;
}
struct Z {};
// CHECK-LABEL: define {{[^@]+}}@test_empty_struct
// CHECK-SAME: (ptr dead_on_unwind noalias writable sret([[STRUCT_S:%.*]]) align 4 [[AGG_RESULT:%.*]], ptr noundef [[FMT:%.*]], ...) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[FMT_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[VA:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[U:%.*]] = alloca [[STRUCT_Z:%.*]], align 1
// CHECK-NEXT: store ptr [[FMT]], ptr [[FMT_ADDR]], align 4
// CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VA]])
// CHECK-NEXT: [[ARGP_CUR:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-NEXT: [[ARGP_NEXT:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR]], i32 0
// CHECK-NEXT: store ptr [[ARGP_NEXT]], ptr [[VA]], align 4
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 1 [[U]], ptr align 4 [[ARGP_CUR]], i32 0, i1 false)
// CHECK-NEXT: [[ARGP_CUR1:%.*]] = load ptr, ptr [[VA]], align 4
// CHECK-NEXT: [[ARGP_NEXT2:%.*]] = getelementptr inbounds i8, ptr [[ARGP_CUR1]], i32 4
// CHECK-NEXT: store ptr [[ARGP_NEXT2]], ptr [[VA]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGP_CUR1]], align 4
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[AGG_RESULT]], ptr align 4 [[TMP0]], i32 12, i1 false)
// CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA]])
// CHECK-NEXT: ret void
//
struct S test_empty_struct(char *fmt, ...) {
va_list va;
va_start(va, fmt);
struct Z u = va_arg(va, struct Z);
struct S v = va_arg(va, struct S);
va_end(va);
return v;
}
|