1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -passes=loop-vectorize -force-vector-width=4 -S %s | FileCheck %s
define void @test(ptr %A, i32 %x) {
; CHECK-LABEL: define void @test(
; CHECK-SAME: ptr [[A:%.*]], i32 [[X:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: br label %[[VECTOR_SCEVCHECK:.*]]
; CHECK: [[VECTOR_SCEVCHECK]]:
; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[X]], 1
; CHECK-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[OFFSET_IDX]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[OFFSET_IDX]], 1
; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP5]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP0]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP8]]
; CHECK-NEXT: store <4 x float> [[WIDE_LOAD]], ptr [[TMP9]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 4
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[T_IV_NEXT:%.*]] = trunc i64 [[IV_NEXT]] to i32
; CHECK-NEXT: [[MUL_IV_NEXT:%.*]] = mul i32 [[T_IV_NEXT]], [[X]]
; CHECK-NEXT: [[IDX_1:%.*]] = zext i32 [[MUL_IV_NEXT]] to i64
; CHECK-NEXT: [[ARRAYIDX1215:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IDX_1]]
; CHECK-NEXT: [[LV:%.*]] = load float, ptr [[ARRAYIDX1215]], align 4
; CHECK-NEXT: [[T_IV:%.*]] = trunc i64 [[IV]] to i32
; CHECK-NEXT: [[MUL_IV:%.*]] = mul i32 [[T_IV]], [[X]]
; CHECK-NEXT: [[IDX_2:%.*]] = zext i32 [[MUL_IV]] to i64
; CHECK-NEXT: [[ARRAYIDX1209:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IDX_2]]
; CHECK-NEXT: store float [[LV]], ptr [[ARRAYIDX1209]], align 4
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
%iv.next = add nuw nsw i64 %iv, 1
%t.iv.next = trunc i64 %iv.next to i32
%mul.iv.next = mul i32 %t.iv.next, %x
%idx.1 = zext i32 %mul.iv.next to i64
%arrayidx1215 = getelementptr inbounds float, ptr %A, i64 %idx.1
%lv = load float, ptr %arrayidx1215, align 4
%t.iv = trunc i64 %iv to i32
%mul.iv = mul i32 %t.iv, %x
%idx.2 = zext i32 %mul.iv to i64
%arrayidx1209 = getelementptr inbounds float, ptr %A, i64 %idx.2
store float %lv, ptr %arrayidx1209, align 4
%ec = icmp eq i64 %iv.next, 1000
br i1 %ec, label %exit, label %loop
exit:
ret void
}
; !llvm.loop.unroll.runtime.disable metadata should be added to the
; scalar loop, as there are no runtime checks needed (or they can be proven
; false).
define void @diff_memcheck_known_false_for_vf_4(ptr %B, ptr %A, ptr %end) {
; CHECK-LABEL: define void @diff_memcheck_known_false_for_vf_4(
; CHECK-SAME: ptr [[B:%.*]], ptr [[A:%.*]], ptr [[END:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64
; CHECK-NEXT: [[A_INT:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[B_CAST:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK-NEXT: [[PTR_SUB:%.*]] = sub i64 [[A_INT]], [[B_CAST]]
; CHECK-NEXT: [[ADD_PTR11:%.*]] = getelementptr i8, ptr [[B]], i64 [[PTR_SUB]]
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[A_INT]], [[END1]]
; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[N_VEC]], -8
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[N_VEC]], -8
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[ADD_PTR11]], i64 [[TMP5]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], -8
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[NEXT_GEP]], i32 0
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[TMP7]], i32 -3
; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr [[TMP8]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP4]], %[[MIDDLE_BLOCK]] ], [ [[A]], %[[ENTRY]] ]
; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ [[ADD_PTR11]], %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV_1:%.*]] = phi ptr [ [[IV_1_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; CHECK-NEXT: [[IV_2:%.*]] = phi ptr [ [[IV_2_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL2]], %[[SCALAR_PH]] ]
; CHECK-NEXT: [[IV_2_NEXT]] = getelementptr nusw i8, ptr [[IV_2]], i64 -8
; CHECK-NEXT: [[IV_1_NEXT]] = getelementptr i8, ptr [[IV_1]], i64 -8
; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr [[IV_2_NEXT]], align 8
; CHECK-NEXT: store i64 0, ptr [[IV_1]], align 8
; CHECK-NEXT: [[CMP_NOT_I_I_I_I:%.*]] = icmp eq ptr [[END]], [[IV_2]]
; CHECK-NEXT: br i1 [[CMP_NOT_I_I_I_I]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
entry:
%A.int = ptrtoint ptr %A to i64
%B.cast = ptrtoint ptr %B to i64
%ptr.sub = sub i64 %A.int, %B.cast
%add.ptr11 = getelementptr i8, ptr %B, i64 %ptr.sub
br label %loop
loop:
%iv.1 = phi ptr [ %iv.1.next, %loop ], [ %A, %entry ]
%iv.2 = phi ptr [ %iv.2.next, %loop ], [ %add.ptr11, %entry ]
%iv.2.next = getelementptr nusw i8, ptr %iv.2, i64 -8
%iv.1.next = getelementptr i8, ptr %iv.1, i64 -8
%2 = load i64, ptr %iv.2.next, align 8
store i64 0, ptr %iv.1, align 8
%cmp.not.i.i.i.i = icmp eq ptr %end, %iv.2
br i1 %cmp.not.i.i.i.i, label %exit, label %loop
exit:
ret void
}
;.
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
;.
|