1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux-gnu -mattr=+v < %s | FileCheck %s
@n = external global [0 x i64]
define i32 @main() {
; CHECK-LABEL: define i32 @main(
; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0.i64(ptr align 8 @n, i64 32, <2 x i1> splat (i1 true), i32 2)
; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i64> [[TMP0]] to <2 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[TMP1]], <2 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP3:%.*]] = sext <2 x i32> [[TMP2]] to <2 x i64>
; CHECK-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.umin.v2i64(<2 x i64> [[TMP3]], <2 x i64> splat (i64 17179869184))
; CHECK-NEXT: [[TMP5:%.*]] = trunc <2 x i64> [[TMP4]] to <2 x i32>
; CHECK-NEXT: [[TMP6:%.*]] = add <2 x i32> [[TMP5]], <i32 0, i32 1>
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP6]])
; CHECK-NEXT: ret i32 [[TMP7]]
;
entry:
%0 = load i64, ptr getelementptr (i8, ptr @n, i64 32), align 8
%conv13.i.1 = trunc i64 %0 to i32
%cond.i.1 = tail call i32 @llvm.smin.i32(i32 %conv13.i.1, i32 0)
%conv40.i.1 = sext i32 %cond.i.1 to i64
%cond47.i.1 = tail call i64 @llvm.umin.i64(i64 %conv40.i.1, i64 17179869184)
%1 = trunc i64 %cond47.i.1 to i32
%2 = add i32 %1, 1
%3 = load i64, ptr @n, align 8
%conv13.i.2 = trunc i64 %3 to i32
%cond.i.2 = tail call i32 @llvm.smin.i32(i32 %conv13.i.2, i32 0)
%conv40.i.2 = sext i32 %cond.i.2 to i64
%cond47.i.2 = tail call i64 @llvm.umin.i64(i64 %conv40.i.2, i64 17179869184)
%4 = trunc i64 %cond47.i.2 to i32
%5 = or i32 %2, %4
ret i32 %5
}
declare i32 @llvm.smin.i32(i32, i32)
declare i64 @llvm.umin.i64(i64, i64)
|