aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/Transforms/SLPVectorizer/X86/split-node-reduce-root.ll
blob: c010ce5586e53dfc1184fdb13fc7f6c771a91b85 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt -S --passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s

define i64 @test(i64 %0) {
; CHECK-LABEL: define i64 @test(
; CHECK-SAME: i64 [[TMP0:%.*]]) {
; CHECK-NEXT:  [[BB:.*:]]
; CHECK-NEXT:    [[TMP1:%.*]] = alloca [20 x i64], align 8
; CHECK-NEXT:    store i64 0, ptr [[TMP1]], align 8
; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i64 72
; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> poison, i64 [[TMP0]], i32 0
; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> poison, <2 x i32> zeroinitializer
; CHECK-NEXT:    [[TMP5:%.*]] = or <2 x i64> [[TMP4]], splat (i64 1)
; CHECK-NEXT:    [[TMP6:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8
; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <4 x i64> [[TMP6]], <4 x i64> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x i64> [[TMP5]], <2 x i64> poison, <6 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <6 x i64> [[TMP7]], <6 x i64> [[TMP8]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 6, i32 7>
; CHECK-NEXT:    [[TMP10:%.*]] = call i64 @llvm.vector.reduce.add.v6i64(<6 x i64> [[TMP9]])
; CHECK-NEXT:    ret i64 [[TMP10]]
;
bb:
  %1 = alloca [20 x i64], align 8
  store i64 0, ptr %1, align 8
  %2 = getelementptr i8, ptr %1, i64 88
  %3 = getelementptr i8, ptr %1, i64 80
  %4 = load i64, ptr %3, align 8
  %5 = getelementptr i8, ptr %1, i64 72
  %6 = load i64, ptr %5, align 8
  %7 = or i64 %0, 1
  %8 = or i64 %0, 1
  %9 = add i64 %7, %8
  %10 = add i64 %6, %9
  %11 = getelementptr i8, ptr %1, i64 96
  %12 = load i64, ptr %11, align 8
  %13 = load i64, ptr %2, align 8
  %14 = add i64 %4, %10
  %15 = add i64 %13, %14
  %16 = add i64 %12, %15
  ret i64 %16
}