1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE42
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1OR2,AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX1OR2,AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX,AVX2,AVX512F
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=CHECK,AVX,AVX2,AVX512BW
define i8 @test_i8_knownbits(i8 %a) {
; CHECK-LABEL: test_i8_knownbits:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: orb $-128, %al
; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%x = or i8 %a, -128
%1 = call i8 @llvm.smin.i8(i8 %x, i8 0)
ret i8 %1
}
define <16 x i8> @test_v16i8_nosignbit(<16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: test_v16i8_nosignbit:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: pminub %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v16i8_nosignbit:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE41-NEXT: pand %xmm2, %xmm0
; SSE41-NEXT: pand %xmm1, %xmm2
; SSE41-NEXT: pminsb %xmm2, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: test_v16i8_nosignbit:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE42-NEXT: pand %xmm2, %xmm0
; SSE42-NEXT: pand %xmm1, %xmm2
; SSE42-NEXT: pminsb %xmm2, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_v16i8_nosignbit:
; AVX1: # %bb.0:
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16i8_nosignbit:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
%1 = and <16 x i8> %a, <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>
%2 = and <16 x i8> %b, <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>
%3 = icmp slt <16 x i8> %1, %2
%4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
ret <16 x i8> %4
}
define <16 x i8> @test_v16i8_reassociation(<16 x i8> %a) {
; SSE2-LABEL: test_v16i8_reassociation:
; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v16i8_reassociation:
; SSE41: # %bb.0:
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pminsb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: test_v16i8_reassociation:
; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm1, %xmm1
; SSE42-NEXT: pminsb %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX-LABEL: test_v16i8_reassociation:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = call <16 x i8> @llvm.smin.v16i8(<16 x i8> %a, <16 x i8> zeroinitializer)
%2 = call <16 x i8> @llvm.smin.v16i8(<16 x i8> %1, <16 x i8> zeroinitializer)
ret <16 x i8> %2
}
define <16 x i8> @test_v16i8_demandedbits(<16 x i8> %x, <16 x i8> %y, <16 x i8> %a, <16 x i8> %b) {
; SSE2-LABEL: test_v16i8_demandedbits:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pcmpgtb %xmm0, %xmm4
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: pandn %xmm1, %xmm4
; SSE2-NEXT: por %xmm0, %xmm4
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: pcmpgtb %xmm4, %xmm0
; SSE2-NEXT: pand %xmm0, %xmm3
; SSE2-NEXT: pandn %xmm2, %xmm0
; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v16i8_demandedbits:
; SSE41: # %bb.0:
; SSE41-NEXT: orps %xmm1, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; SSE42-LABEL: test_v16i8_demandedbits:
; SSE42: # %bb.0:
; SSE42-NEXT: orps %xmm1, %xmm0
; SSE42-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; SSE42-NEXT: movdqa %xmm2, %xmm0
; SSE42-NEXT: retq
;
; AVX1OR2-LABEL: test_v16i8_demandedbits:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1OR2-NEXT: vpblendvb %xmm0, %xmm3, %xmm2, %xmm0
; AVX1OR2-NEXT: retq
;
; AVX512F-LABEL: test_v16i8_demandedbits:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vpblendvb %xmm0, %xmm3, %xmm2, %xmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_v16i8_demandedbits:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: # kill: def $xmm3 killed $xmm3 def $zmm3
; AVX512BW-NEXT: # kill: def $xmm2 killed $xmm2 def $zmm2
; AVX512BW-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vpcmpnltb %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpblendmb %zmm2, %zmm3, %zmm0 {%k1}
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%smin = tail call <16 x i8> @llvm.smin.v16i8(<16 x i8> %x, <16 x i8> %y)
%cmp = icmp sge <16 x i8> %smin, zeroinitializer
%res = select <16 x i1> %cmp, <16 x i8> %a, <16 x i8> %b
ret <16 x i8> %res
}
declare i8 @llvm.smin.i8(i8, i8)
declare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>)
|