1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 -passes=slp-vectorizer,dce -S | FileCheck %s --check-prefixes=CHECK,SSE42
; RUN: opt < %s -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -passes=slp-vectorizer,dce -S | FileCheck %s --check-prefixes=CHECK,AVX
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; int test_sext_4i8_to_4i32(int * restrict A, char * restrict B) {
; A[0] = B[0];
; A[1] = B[1];
; A[2] = B[2];
; A[3] = B[3];
; }
define i32 @test_sext_4i8_to_4i32(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-LABEL: @test_sext_4i8_to_4i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[B:%.*]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i8> [[TMP1]] to <4 x i32>
; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[A:%.*]], align 4
; CHECK-NEXT: ret i32 undef
;
entry:
%0 = load i8, ptr %B, align 1
%conv = sext i8 %0 to i32
store i32 %conv, ptr %A, align 4
%arrayidx2 = getelementptr inbounds i8, ptr %B, i64 1
%1 = load i8, ptr %arrayidx2, align 1
%conv3 = sext i8 %1 to i32
%arrayidx4 = getelementptr inbounds i32, ptr %A, i64 1
store i32 %conv3, ptr %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds i8, ptr %B, i64 2
%2 = load i8, ptr %arrayidx5, align 1
%conv6 = sext i8 %2 to i32
%arrayidx7 = getelementptr inbounds i32, ptr %A, i64 2
store i32 %conv6, ptr %arrayidx7, align 4
%arrayidx8 = getelementptr inbounds i8, ptr %B, i64 3
%3 = load i8, ptr %arrayidx8, align 1
%conv9 = sext i8 %3 to i32
%arrayidx10 = getelementptr inbounds i32, ptr %A, i64 3
store i32 %conv9, ptr %arrayidx10, align 4
ret i32 undef
}
define i32 @test_zext_4i16_to_4i32(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-LABEL: @test_zext_4i16_to_4i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr [[B:%.*]], align 1
; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32>
; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[A:%.*]], align 4
; CHECK-NEXT: ret i32 undef
;
entry:
%0 = load i16, ptr %B, align 1
%conv = zext i16 %0 to i32
store i32 %conv, ptr %A, align 4
%arrayidx2 = getelementptr inbounds i16, ptr %B, i64 1
%1 = load i16, ptr %arrayidx2, align 1
%conv3 = zext i16 %1 to i32
%arrayidx4 = getelementptr inbounds i32, ptr %A, i64 1
store i32 %conv3, ptr %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds i16, ptr %B, i64 2
%2 = load i16, ptr %arrayidx5, align 1
%conv6 = zext i16 %2 to i32
%arrayidx7 = getelementptr inbounds i32, ptr %A, i64 2
store i32 %conv6, ptr %arrayidx7, align 4
%arrayidx8 = getelementptr inbounds i16, ptr %B, i64 3
%3 = load i16, ptr %arrayidx8, align 1
%conv9 = zext i16 %3 to i32
%arrayidx10 = getelementptr inbounds i32, ptr %A, i64 3
store i32 %conv9, ptr %arrayidx10, align 4
ret i32 undef
}
define i64 @test_sext_4i16_to_4i64(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; SSE42-LABEL: @test_sext_4i16_to_4i64(
; SSE42-NEXT: entry:
; SSE42-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[B:%.*]], align 1
; SSE42-NEXT: [[TMP2:%.*]] = sext <2 x i16> [[TMP1]] to <2 x i64>
; SSE42-NEXT: store <2 x i64> [[TMP2]], ptr [[A:%.*]], align 4
; SSE42-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, ptr [[B]], i64 2
; SSE42-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 2
; SSE42-NEXT: [[TMP5:%.*]] = load <2 x i16>, ptr [[ARRAYIDX5]], align 1
; SSE42-NEXT: [[TMP6:%.*]] = sext <2 x i16> [[TMP5]] to <2 x i64>
; SSE42-NEXT: store <2 x i64> [[TMP6]], ptr [[ARRAYIDX7]], align 4
; SSE42-NEXT: ret i64 undef
;
; AVX-LABEL: @test_sext_4i16_to_4i64(
; AVX-NEXT: entry:
; AVX-NEXT: [[TMP1:%.*]] = load <4 x i16>, ptr [[B:%.*]], align 1
; AVX-NEXT: [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i64>
; AVX-NEXT: store <4 x i64> [[TMP2]], ptr [[A:%.*]], align 4
; AVX-NEXT: ret i64 undef
;
entry:
%0 = load i16, ptr %B, align 1
%conv = sext i16 %0 to i64
store i64 %conv, ptr %A, align 4
%arrayidx2 = getelementptr inbounds i16, ptr %B, i64 1
%1 = load i16, ptr %arrayidx2, align 1
%conv3 = sext i16 %1 to i64
%arrayidx4 = getelementptr inbounds i64, ptr %A, i64 1
store i64 %conv3, ptr %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds i16, ptr %B, i64 2
%2 = load i16, ptr %arrayidx5, align 1
%conv6 = sext i16 %2 to i64
%arrayidx7 = getelementptr inbounds i64, ptr %A, i64 2
store i64 %conv6, ptr %arrayidx7, align 4
%arrayidx8 = getelementptr inbounds i16, ptr %B, i64 3
%3 = load i16, ptr %arrayidx8, align 1
%conv9 = sext i16 %3 to i64
%arrayidx10 = getelementptr inbounds i64, ptr %A, i64 3
store i64 %conv9, ptr %arrayidx10, align 4
ret i64 undef
}
|