1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s -check-prefixes=AVX512VL
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+avx512vbmi2 | FileCheck %s -check-prefixes=AVX512VBMI
; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver4 | FileCheck %s -check-prefixes=ZNVER4
; i512 shifts hidden inside 512-bit vectors.
define <8 x i64> @shl_i512_1(<8 x i64> %a) {
; AVX512VL-LABEL: shl_i512_1:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: valignq {{.*#+}} zmm1 = zmm0[3,4,5,6,7,0,1,2]
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512VL-NEXT: vpsllq $1, %xmm0, %xmm3
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
; AVX512VL-NEXT: vpsrlq $63, %xmm4, %xmm4
; AVX512VL-NEXT: vpaddq %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vpor %xmm4, %xmm2, %xmm2
; AVX512VL-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512VL-NEXT: vpaddq %ymm3, %ymm3, %ymm3
; AVX512VL-NEXT: vpsrlq $63, %ymm1, %ymm1
; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
; AVX512VL-NEXT: vpsrlq $63, %zmm0, %zmm2
; AVX512VL-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; AVX512VL-NEXT: vpaddq %zmm0, %zmm0, %zmm0
; AVX512VL-NEXT: vporq %zmm2, %zmm0, %zmm0
; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[2],zmm0[2],zmm1[4],zmm0[4],zmm1[6],zmm0[6]
; AVX512VL-NEXT: retq
;
; AVX512VBMI-LABEL: shl_i512_1:
; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
; AVX512VBMI-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3
; AVX512VBMI-NEXT: vpsllq $1, %xmm0, %xmm4
; AVX512VBMI-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3
; AVX512VBMI-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
; AVX512VBMI-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
; AVX512VBMI-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512VBMI-NEXT: vpshldq $1, %ymm1, %ymm2, %ymm1
; AVX512VBMI-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
; AVX512VBMI-NEXT: vpshufd {{.*#+}} zmm2 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; AVX512VBMI-NEXT: vpshldq $1, %zmm0, %zmm2, %zmm0
; AVX512VBMI-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[2],zmm0[2],zmm1[4],zmm0[4],zmm1[6],zmm0[6]
; AVX512VBMI-NEXT: retq
;
; ZNVER4-LABEL: shl_i512_1:
; ZNVER4: # %bb.0:
; ZNVER4-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; ZNVER4-NEXT: vextracti128 $1, %ymm0, %xmm2
; ZNVER4-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
; ZNVER4-NEXT: vpsllq $1, %xmm0, %xmm4
; ZNVER4-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
; ZNVER4-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3
; ZNVER4-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; ZNVER4-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
; ZNVER4-NEXT: vpshldq $1, %ymm1, %ymm2, %ymm1
; ZNVER4-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3
; ZNVER4-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1
; ZNVER4-NEXT: vpshufd {{.*#+}} zmm3 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; ZNVER4-NEXT: vpshldq $1, %zmm0, %zmm3, %zmm0
; ZNVER4-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[2],zmm0[2],zmm1[4],zmm0[4],zmm1[6],zmm0[6]
; ZNVER4-NEXT: retq
%d = bitcast <8 x i64> %a to i512
%s = shl i512 %d, 1
%r = bitcast i512 %s to <8 x i64>
ret <8 x i64> %r
}
define <8 x i64> @lshr_i512_1(<8 x i64> %a) {
; AVX512VL-LABEL: lshr_i512_1:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vextracti32x4 $2, %zmm0, %xmm2
; AVX512VL-NEXT: vextracti32x4 $3, %zmm0, %xmm3
; AVX512VL-NEXT: vpsllq $63, %xmm3, %xmm4
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
; AVX512VL-NEXT: vpsrlq $1, %xmm5, %xmm5
; AVX512VL-NEXT: vpor %xmm5, %xmm4, %xmm4
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
; AVX512VL-NEXT: vpsrlq $1, %xmm3, %xmm3
; AVX512VL-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3
; AVX512VL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpsllq $63, %ymm1, %ymm1
; AVX512VL-NEXT: vpshufd {{.*#+}} ymm2 = ymm0[2,3,2,3,6,7,6,7]
; AVX512VL-NEXT: vpsrlq $1, %ymm2, %ymm2
; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
; AVX512VL-NEXT: vpsrlq $1, %zmm0, %zmm2
; AVX512VL-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; AVX512VL-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512VL-NEXT: vporq %zmm2, %zmm0, %zmm0
; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; AVX512VL-NEXT: retq
;
; AVX512VBMI-LABEL: lshr_i512_1:
; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VBMI-NEXT: vextracti32x4 $3, %zmm0, %xmm2
; AVX512VBMI-NEXT: vextracti32x4 $2, %zmm0, %xmm3
; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
; AVX512VBMI-NEXT: vpshldq $63, %xmm4, %xmm2, %xmm4
; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
; AVX512VBMI-NEXT: vpsrlq $1, %xmm2, %xmm2
; AVX512VBMI-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
; AVX512VBMI-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
; AVX512VBMI-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[2,3,2,3,6,7,6,7]
; AVX512VBMI-NEXT: vpshldq $63, %ymm3, %ymm1, %ymm1
; AVX512VBMI-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
; AVX512VBMI-NEXT: vpshufd {{.*#+}} zmm2 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; AVX512VBMI-NEXT: vpshldq $63, %zmm0, %zmm2, %zmm0
; AVX512VBMI-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; AVX512VBMI-NEXT: retq
;
; ZNVER4-LABEL: lshr_i512_1:
; ZNVER4: # %bb.0:
; ZNVER4-NEXT: vextracti32x4 $2, %zmm0, %xmm3
; ZNVER4-NEXT: vextracti128 $1, %ymm0, %xmm1
; ZNVER4-NEXT: vextracti32x4 $3, %zmm0, %xmm2
; ZNVER4-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
; ZNVER4-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
; ZNVER4-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[2,3,2,3,6,7,6,7]
; ZNVER4-NEXT: vpshldq $63, %xmm4, %xmm2, %xmm4
; ZNVER4-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
; ZNVER4-NEXT: vpshldq $63, %ymm3, %ymm1, %ymm1
; ZNVER4-NEXT: vpshufd {{.*#+}} zmm3 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; ZNVER4-NEXT: vpsrlq $1, %xmm2, %xmm2
; ZNVER4-NEXT: vpshldq $63, %zmm0, %zmm3, %zmm0
; ZNVER4-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
; ZNVER4-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
; ZNVER4-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; ZNVER4-NEXT: retq
%d = bitcast <8 x i64> %a to i512
%s = lshr i512 %d, 1
%r = bitcast i512 %s to <8 x i64>
ret <8 x i64> %r
}
define <8 x i64> @ashr_i512_1(<8 x i64> %a) {
; AVX512VL-LABEL: ashr_i512_1:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vextracti32x4 $2, %zmm0, %xmm2
; AVX512VL-NEXT: vextracti32x4 $3, %zmm0, %xmm3
; AVX512VL-NEXT: vpsllq $63, %xmm3, %xmm4
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
; AVX512VL-NEXT: vpsrlq $1, %xmm5, %xmm5
; AVX512VL-NEXT: vpor %xmm5, %xmm4, %xmm4
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
; AVX512VL-NEXT: vpsraq $1, %xmm3, %xmm3
; AVX512VL-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3
; AVX512VL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; AVX512VL-NEXT: vpsllq $63, %ymm1, %ymm1
; AVX512VL-NEXT: vpshufd {{.*#+}} ymm2 = ymm0[2,3,2,3,6,7,6,7]
; AVX512VL-NEXT: vpsrlq $1, %ymm2, %ymm2
; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
; AVX512VL-NEXT: vpsrlq $1, %zmm0, %zmm2
; AVX512VL-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; AVX512VL-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512VL-NEXT: vporq %zmm2, %zmm0, %zmm0
; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; AVX512VL-NEXT: retq
;
; AVX512VBMI-LABEL: ashr_i512_1:
; AVX512VBMI: # %bb.0:
; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512VBMI-NEXT: vextracti32x4 $3, %zmm0, %xmm2
; AVX512VBMI-NEXT: vextracti32x4 $2, %zmm0, %xmm3
; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
; AVX512VBMI-NEXT: vpshldq $63, %xmm4, %xmm2, %xmm4
; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
; AVX512VBMI-NEXT: vpsraq $1, %xmm2, %xmm2
; AVX512VBMI-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
; AVX512VBMI-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
; AVX512VBMI-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[2,3,2,3,6,7,6,7]
; AVX512VBMI-NEXT: vpshldq $63, %ymm3, %ymm1, %ymm1
; AVX512VBMI-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
; AVX512VBMI-NEXT: vpshufd {{.*#+}} zmm2 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; AVX512VBMI-NEXT: vpshldq $63, %zmm0, %zmm2, %zmm0
; AVX512VBMI-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; AVX512VBMI-NEXT: retq
;
; ZNVER4-LABEL: ashr_i512_1:
; ZNVER4: # %bb.0:
; ZNVER4-NEXT: vextracti32x4 $2, %zmm0, %xmm3
; ZNVER4-NEXT: vextracti128 $1, %ymm0, %xmm1
; ZNVER4-NEXT: vextracti32x4 $3, %zmm0, %xmm2
; ZNVER4-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
; ZNVER4-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
; ZNVER4-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[2,3,2,3,6,7,6,7]
; ZNVER4-NEXT: vpshldq $63, %xmm4, %xmm2, %xmm4
; ZNVER4-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
; ZNVER4-NEXT: vpshldq $63, %ymm3, %ymm1, %ymm1
; ZNVER4-NEXT: vpshufd {{.*#+}} zmm3 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15]
; ZNVER4-NEXT: vpsraq $1, %xmm2, %xmm2
; ZNVER4-NEXT: vpshldq $63, %zmm0, %zmm3, %zmm0
; ZNVER4-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
; ZNVER4-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
; ZNVER4-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; ZNVER4-NEXT: retq
%d = bitcast <8 x i64> %a to i512
%s = ashr i512 %d, 1
%r = bitcast i512 %s to <8 x i64>
ret <8 x i64> %r
}
|