aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/Hexagon/addsat.ll
blob: 489c7d5a0fdff5ec9ff7ff31faae5cda14286a08 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
; RUN: llc -march=hexagon < %s | FileCheck %s

; Test for saturating add instructions.

; CHECK-LABEL: test1
; CHECK: v{{.*}}.ub = vadd(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub):sat
define <128 x i8> @test1(<128 x i8>* %a0, <128 x i8>* %a1) #0 {
entry:
  %wide.load = load <128 x i8>, <128 x i8>* %a0, align 1
  %wide.load62 = load <128 x i8>, <128 x i8>* %a1, align 1
  %add = call <128 x i8> @llvm.uadd.sat.v128i8(<128 x i8> %wide.load, <128 x i8> %wide.load62)
  ret <128 x i8> %add
}

; CHECK-LABEL: test2
; CHECK: v{{.*}}.b = vadd(v{{[0-9]+}}.b,v{{[0-9]+}}.b):sat
define <128 x i8> @test2(<128 x i8>* %a0, <128 x i8>* %a1) #0 {
entry:
  %wide.load = load <128 x i8>, <128 x i8>* %a0, align 1
  %wide.load62 = load <128 x i8>, <128 x i8>* %a1, align 1
  %add = call <128 x i8> @llvm.sadd.sat.v128i8(<128 x i8> %wide.load, <128 x i8> %wide.load62)
  ret <128 x i8> %add
}

; CHECK-LABEL: test3
; CHECK: v{{.*}}.uh = vadd(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh):sat
define <64 x i16> @test3(<64 x i16>* %a0, <64 x i16>* %a1) #0 {
entry:
  %wide.load = load <64 x i16>, <64 x i16>* %a0, align 1
  %wide.load62 = load <64 x i16>, <64 x i16>* %a1, align 1
  %add = call <64 x i16> @llvm.uadd.sat.v64i16(<64 x i16> %wide.load, <64 x i16> %wide.load62)
  ret <64 x i16> %add
}

; CHECK-LABEL: test4
; CHECK: v{{.*}}.h = vadd(v{{[0-9]+}}.h,v{{[0-9]+}}.h):sat
define <64 x i16> @test4(<64 x i16>* %a0, <64 x i16>* %a1) #0 {
entry:
  %wide.load = load <64 x i16>, <64 x i16>* %a0, align 1
  %wide.load62 = load <64 x i16>, <64 x i16>* %a1, align 1
  %add = call <64 x i16> @llvm.sadd.sat.v64i16(<64 x i16> %wide.load, <64 x i16> %wide.load62)
  ret <64 x i16> %add
}

; CHECK-LABEL: test5
; CHECK: v{{.*}}.uw = vadd(v{{[0-9]+}}.uw,v{{[0-9]+}}.uw):sat
define <32 x i32> @test5(<32 x i32>* %a0, <32 x i32>* %a1) #0 {
entry:
  %wide.load = load <32 x i32>, <32 x i32>* %a0, align 1
  %wide.load62 = load <32 x i32>, <32 x i32>* %a1, align 1
  %add = call <32 x i32> @llvm.uadd.sat.v32i32(<32 x i32> %wide.load, <32 x i32> %wide.load62)
  ret <32 x i32> %add
}

; CHECK-LABEL: test6
; CHECK: v{{.*}}.w = vadd(v{{[0-9]+}}.w,v{{[0-9]+}}.w):sat
define <32 x i32> @test6(<32 x i32>* %a0, <32 x i32>* %a1) #0 {
entry:
  %wide.load = load <32 x i32>, <32 x i32>* %a0, align 1
  %wide.load62 = load <32 x i32>, <32 x i32>* %a1, align 1
  %add = call <32 x i32> @llvm.sadd.sat.v32i32(<32 x i32> %wide.load, <32 x i32> %wide.load62)
  ret <32 x i32> %add
}

; CHECK-LABEL: test7
; CHECK: v{{[0-9]+}}:{{[0-9]+}}.ub = vadd(v{{[0-9]+}}:{{[0-9]+}}.ub,v{{[0-9]+}}:{{[0-9]+}}.ub):sat
define <256 x i8> @test7(<256 x i8>* %a0, <256 x i8>* %a1) #0 {
entry:
  %wide.load = load <256 x i8>, <256 x i8>* %a0, align 1
  %wide.load62 = load <256 x i8>, <256 x i8>* %a1, align 1
  %add = call <256 x i8> @llvm.uadd.sat.v256i8(<256 x i8> %wide.load, <256 x i8> %wide.load62)
  ret <256 x i8> %add
}

; CHECK-LABEL: test8
; CHECK: v{{[0-9]+}}:{{[0-9]+}}.b = vadd(v{{[0-9]+}}:{{[0-9]+}}.b,v{{[0-9]+}}:{{[0-9]+}}.b):sat
define <256 x i8> @test8(<256 x i8>* %a0, <256 x i8>* %a1) #0 {
entry:
  %wide.load = load <256 x i8>, <256 x i8>* %a0, align 1
  %wide.load62 = load <256 x i8>, <256 x i8>* %a1, align 1
  %add = call <256 x i8> @llvm.sadd.sat.v256i8(<256 x i8> %wide.load, <256 x i8> %wide.load62)
  ret <256 x i8> %add
}

; CHECK-LABEL: test9
; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uh = vadd(v{{[0-9]+}}:{{[0-9]+}}.uh,v{{[0-9]+}}:{{[0-9]+}}.uh):sat
define <128 x i16> @test9(<128 x i16>* %a0, <128 x i16>* %a1) #0 {
entry:
  %wide.load = load <128 x i16>, <128 x i16>* %a0, align 1
  %wide.load62 = load <128 x i16>, <128 x i16>* %a1, align 1
  %add = call <128 x i16> @llvm.uadd.sat.v128i16(<128 x i16> %wide.load, <128 x i16> %wide.load62)
  ret <128 x i16> %add
}

; CHECK-LABEL: test10
; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vadd(v{{[0-9]+}}:{{[0-9]+}}.h,v{{[0-9]+}}:{{[0-9]+}}.h):sat
define <128 x i16> @test10(<128 x i16>* %a0, <128 x i16>* %a1) #0 {
entry:
  %wide.load = load <128 x i16>, <128 x i16>* %a0, align 1
  %wide.load62 = load <128 x i16>, <128 x i16>* %a1, align 1
  %add = call <128 x i16> @llvm.sadd.sat.v128i16(<128 x i16> %wide.load, <128 x i16> %wide.load62)
  ret <128 x i16> %add
}

; CHECK-LABEL: test11
; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw = vadd(v{{[0-9]+}}:{{[0-9]+}}.uw,v{{[0-9]+}}:{{[0-9]+}}.uw):sat
define <64 x i32> @test11(<64 x i32>* %a0, <64 x i32>* %a1) #0 {
entry:
  %wide.load = load <64 x i32>, <64 x i32>* %a0, align 1
  %wide.load62 = load <64 x i32>, <64 x i32>* %a1, align 1
  %add = call <64 x i32> @llvm.uadd.sat.v64i32(<64 x i32> %wide.load, <64 x i32> %wide.load62)
  ret <64 x i32> %add
}

; CHECK-LABEL: test12
; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vadd(v{{[0-9]+}}:{{[0-9]+}}.w,v{{[0-9]+}}:{{[0-9]+}}.w):sat
define <64 x i32> @test12(<64 x i32>* %a0, <64 x i32>* %a1) #0 {
entry:
  %wide.load = load <64 x i32>, <64 x i32>* %a0, align 1
  %wide.load62 = load <64 x i32>, <64 x i32>* %a1, align 1
  %add = call <64 x i32> @llvm.sadd.sat.v64i32(<64 x i32> %wide.load, <64 x i32> %wide.load62)
  ret <64 x i32> %add
}

; CHECK-LABEL: test13
; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}},r{{[0-9]+}}):sat
define i32 @test13(i32 %a0, i32 %a1) #0 {
entry:
  %add = call i32 @llvm.sadd.sat.i32(i32 %a0, i32 %a1)
  ret i32 %add
}

; CHECK-LABEL: test14
; CHECK: r{{[0-9]+}}:{{[0-9]+}} = add(r{{[0-9]+}}:{{[0-9]+}},r{{[0-9]+}}:{{[0-9]+}}):sat
define i64 @test14(i64 %a0, i64 %a1) #0 {
entry:
  %add = call i64 @llvm.sadd.sat.i64(i64 %a0, i64 %a1)
  ret i64 %add
}

declare <128 x i8> @llvm.uadd.sat.v128i8(<128 x i8>, <128 x i8>) #1
declare <128 x i8> @llvm.sadd.sat.v128i8(<128 x i8>, <128 x i8>) #1
declare <64 x i16> @llvm.uadd.sat.v64i16(<64 x i16>, <64 x i16>) #1
declare <64 x i16> @llvm.sadd.sat.v64i16(<64 x i16>, <64 x i16>) #1
declare <32 x i32> @llvm.uadd.sat.v32i32(<32 x i32>, <32 x i32>) #1
declare <32 x i32> @llvm.sadd.sat.v32i32(<32 x i32>, <32 x i32>) #1
declare <256 x i8> @llvm.uadd.sat.v256i8(<256 x i8>, <256 x i8>) #1
declare <256 x i8> @llvm.sadd.sat.v256i8(<256 x i8>, <256 x i8>) #1
declare <128 x i16> @llvm.uadd.sat.v128i16(<128 x i16>, <128 x i16>) #1
declare <128 x i16> @llvm.sadd.sat.v128i16(<128 x i16>, <128 x i16>) #1
declare <64 x i32> @llvm.uadd.sat.v64i32(<64 x i32>, <64 x i32>) #1
declare <64 x i32> @llvm.sadd.sat.v64i32(<64 x i32>, <64 x i32>) #1
declare i32 @llvm.sadd.sat.i32(i32, i32)
declare i64 @llvm.sadd.sat.i64(i64, i64)

attributes #0 = { nounwind "target-cpu"="hexagonv73" "target-features"="+hvxv73,+hvx-length128b" }
attributes #1 = { nounwind readnone speculatable willreturn }