diff options
Diffstat (limited to 'llvm/test/Transforms')
11 files changed, 1560 insertions, 438 deletions
diff --git a/llvm/test/Transforms/DeadStoreElimination/zeroed-missing.ll b/llvm/test/Transforms/DeadStoreElimination/zeroed-missing.ll index e390d4b..303afc20 100644 --- a/llvm/test/Transforms/DeadStoreElimination/zeroed-missing.ll +++ b/llvm/test/Transforms/DeadStoreElimination/zeroed-missing.ll @@ -12,6 +12,6 @@ define ptr @undeclared_customalloc(i64 %size, i64 %align) { ret ptr %call } -declare ptr @customalloc2(i64, i64) allockind("alloc") "alloc-family"="customalloc2" "alloc-variant-zeroed"="customalloc2_zeroed" +declare ptr @customalloc2(i64, i64) allockind("alloc,uninitialized") "alloc-family"="customalloc2" "alloc-variant-zeroed"="customalloc2_zeroed" ; CHECK-DAG: declare ptr @customalloc2_zeroed(i64, i64) #[[CA2ATTR:[0-9]+]] ; CHECK-DAG: attributes #[[CA2ATTR]] = { allockind("alloc,zeroed") "alloc-family"="customalloc2" } diff --git a/llvm/test/Transforms/LoopInterchange/force-interchange.ll b/llvm/test/Transforms/LoopInterchange/force-interchange.ll new file mode 100644 index 0000000..c33ecdf --- /dev/null +++ b/llvm/test/Transforms/LoopInterchange/force-interchange.ll @@ -0,0 +1,43 @@ +; RUN: opt < %s -passes=loop-interchange -pass-remarks-output=%t -disable-output -loop-interchange-profitabilities=ignore -S +; RUN: FileCheck --input-file=%t %s + +; There should be no reason to interchange this, unless it is forced. +; +; for (int i = 0; i<1024; i++) +; for (int j = 0; j<1024; j++) +; A[i][j] = 42; +; +; CHECK: --- !Passed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: f +; CHECK-NEXT: Args: +; CHECK-NEXT: - String: Loop interchanged with enclosing loop. +; CHECK-NEXT: ... + +@A = dso_local local_unnamed_addr global [1024 x [1024 x i32]] zeroinitializer, align 4 + +define dso_local void @f() local_unnamed_addr #0 { +entry: + br label %outer.header + +outer.header: + %i = phi i64 [ 0, %entry ], [ %i.next, %inner.header ] + br label %inner.body + +inner.header: + %i.next = add nuw nsw i64 %i, 1 + %exitcond20.not = icmp eq i64 %i.next, 1024 + br i1 %exitcond20.not, label %exit, label %outer.header + +inner.body: + %j = phi i64 [ 0, %outer.header ], [ %j.next, %inner.body ] + %arrayidx6 = getelementptr inbounds nuw [1024 x [1024 x i32]], ptr @A, i64 0, i64 %i, i64 %j + store i32 42, ptr %arrayidx6, align 4 + %j.next = add nuw nsw i64 %j, 1 + %exitcond.not = icmp eq i64 %j.next, 1024 + br i1 %exitcond.not, label %inner.header, label %inner.body + +exit: + ret void +} diff --git a/llvm/test/Transforms/LoopInterchange/fp-reductions.ll b/llvm/test/Transforms/LoopInterchange/fp-reductions.ll new file mode 100644 index 0000000..0703a7b --- /dev/null +++ b/llvm/test/Transforms/LoopInterchange/fp-reductions.ll @@ -0,0 +1,437 @@ +; RUN: opt < %s -passes=loop-interchange -cache-line-size=64 -pass-remarks-output=%t -disable-output \ +; RUN: -verify-dom-info -verify-loop-info -verify-loop-lcssa +; RUN: FileCheck -input-file=%t %s + +; Check that the loops aren't exchanged if there is a reduction of +; non-reassociative floating-point addition. +; +; float sum = 0; +; for (int i = 0; i < 2; i++) +; for (int j = 0; j < 2; j++) +; sum += A[j][i]; + +; CHECK: --- !Missed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: UnsupportedPHIOuter +; CHECK-NEXT: Function: reduction_fadd +define void @reduction_fadd(ptr %A) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + %sum.i = phi float [ 0.0, %entry ], [ %sum.i.lcssa, %for.i.latch ] + br label %for.j + +for.j: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] + %sum.j = phi float [ %sum.i, %for.i.header ], [ %sum.j.next, %for.j ] + %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i + %a = load float, ptr %idx, align 4 + %sum.j.next = fadd float %sum.j, %a + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 2 + br i1 %cmp.j, label %for.j, label %for.i.latch + +for.i.latch: + %sum.i.lcssa = phi float [ %sum.j.next, %for.j ] + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 2 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; Check that the interchange is legal if the floating-point addition is marked +; as reassoc. +; +; CHECK: --- !Pass +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: reduction_reassoc_fadd +define void @reduction_reassoc_fadd(ptr %A) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + %sum.i = phi float [ 0.0, %entry ], [ %sum.i.lcssa, %for.i.latch ] + br label %for.j + +for.j: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] + %sum.j = phi float [ %sum.i, %for.i.header ], [ %sum.j.next, %for.j ] + %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i + %a = load float, ptr %idx, align 4 + %sum.j.next = fadd reassoc float %sum.j, %a + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 2 + br i1 %cmp.j, label %for.j, label %for.i.latch + +for.i.latch: + %sum.i.lcssa = phi float [ %sum.j.next, %for.j ] + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 2 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; FIXME: Is it really legal to interchange the loops when +; both reassoc and ninf are set? +; Check that the interchange is legal if the floating-point addition is marked +; as reassoc. +; +; CHECK: --- !Pass +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: reduction_reassoc_ninf_fadd +define void @reduction_reassoc_ninf_fadd(ptr %A) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + %sum.i = phi float [ 0.0, %entry ], [ %sum.i.lcssa, %for.i.latch ] + br label %for.j + +for.j: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] + %sum.j = phi float [ %sum.i, %for.i.header ], [ %sum.j.next, %for.j ] + %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i + %a = load float, ptr %idx, align 4 + %sum.j.next = fadd reassoc ninf float %sum.j, %a + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 2 + br i1 %cmp.j, label %for.j, label %for.i.latch + +for.i.latch: + %sum.i.lcssa = phi float [ %sum.j.next, %for.j ] + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 2 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; Check that the loops aren't exchanged if there is a reduction of +; non-reassociative floating-point multiplication. +; +; float prod = 1; +; for (int i = 0; i < 2; i++) +; for (int j = 0; j < 2; j++) +; prod *= A[j][i]; + +; CHECK: --- !Missed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: UnsupportedPHIOuter +; CHECK-NEXT: Function: reduction_fmul +define void @reduction_fmul(ptr %A) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + %prod.i = phi float [ 1.0, %entry ], [ %prod.i.lcssa, %for.i.latch ] + br label %for.j + +for.j: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] + %prod.j = phi float [ %prod.i, %for.i.header ], [ %prod.j.next, %for.j ] + %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i + %a = load float, ptr %idx, align 4 + %prod.j.next = fmul float %prod.j, %a + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 2 + br i1 %cmp.j, label %for.j, label %for.i.latch + +for.i.latch: + %prod.i.lcssa = phi float [ %prod.j.next, %for.j ] + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 2 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; Check that the interchange is legal if the floating-point multiplication is +; marked as reassoc. +; +; CHECK: --- !Pass +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: reduction_reassoc_fmul +define void @reduction_reassoc_fmul(ptr %A) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + %prod.i = phi float [ 1.0, %entry ], [ %prod.i.lcssa, %for.i.latch ] + br label %for.j + +for.j: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] + %prod.j = phi float [ %prod.i, %for.i.header ], [ %prod.j.next, %for.j ] + %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i + %a = load float, ptr %idx, align 4 + %prod.j.next = fmul reassoc float %prod.j, %a + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 2 + br i1 %cmp.j, label %for.j, label %for.i.latch + +for.i.latch: + %prod.i.lcssa = phi float [ %prod.j.next, %for.j ] + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 2 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; Check that the loops aren't exchanged if there is a reduction of +; non-reassociative floating-point fmuladd. +; +; float fmuladd = 0; +; for (int i = 0; i < 2; i++) +; for (int j = 0; j < 2; j++) +; fmuladd += A[j][i] * B[j][i]; + +; CHECK: --- !Missed +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: UnsupportedPHIOuter +; CHECK-NEXT: Function: reduction_fmuladd +define void @reduction_fmuladd(ptr %A, ptr %B) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + %fmuladd.i = phi float [ 1.0, %entry ], [ %fmuladd.i.lcssa, %for.i.latch ] + br label %for.j + +for.j: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] + %fmuladd.j = phi float [ %fmuladd.i, %for.i.header ], [ %fmuladd.j.next, %for.j ] + %idx.a = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i + %idx.b = getelementptr inbounds [2 x [2 x i32]], ptr %B, i32 0, i32 %j, i32 %i + %a = load float, ptr %idx.a, align 4 + %b = load float, ptr %idx.b, align 4 + %fmuladd.j.next = call float @llvm.fmuladd.f32(float %a, float %b, float %fmuladd.j) + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 2 + br i1 %cmp.j, label %for.j, label %for.i.latch + +for.i.latch: + %fmuladd.i.lcssa = phi float [ %fmuladd.j.next, %for.j ] + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 2 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; Check that the interchange is legal if the floating-point fmuladd is marked +; as reassoc. +; +; CHECK: --- !Pass +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: reduction_reassoc_fmuladd +define void @reduction_reassoc_fmuladd(ptr %A, ptr %B) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + %fmuladd.i = phi float [ 1.0, %entry ], [ %fmuladd.i.lcssa, %for.i.latch ] + br label %for.j + +for.j: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] + %fmuladd.j = phi float [ %fmuladd.i, %for.i.header ], [ %fmuladd.j.next, %for.j ] + %idx.a = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i + %idx.b = getelementptr inbounds [2 x [2 x i32]], ptr %B, i32 0, i32 %j, i32 %i + %a = load float, ptr %idx.a, align 4 + %b = load float, ptr %idx.b, align 4 + %fmuladd.j.next = call reassoc float @llvm.fmuladd.f32(float %a, float %b, float %fmuladd.j) + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 2 + br i1 %cmp.j, label %for.j, label %for.i.latch + +for.i.latch: + %fmuladd.i.lcssa = phi float [ %fmuladd.j.next, %for.j ] + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 2 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; Check that interchanging the loops is legal for the reassociative +; floating-point minimum. +; +; float fmin = init; +; for (int i = 0; i < 2; i++) +; for (int j = 0; j < 2; j++) +; fmin = (A[j][i] < fmin) ? A[j][i] : fmin; + +; CHECK: --- !Pass +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: reduction_fmin +define void @reduction_fmin(ptr %A, float %init) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + %fmin.i = phi float [ %init, %entry ], [ %fmin.i.lcssa, %for.i.latch ] + br label %for.j + +for.j: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] + %fmin.j = phi float [ %fmin.i, %for.i.header ], [ %fmin.j.next, %for.j ] + %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i + %a = load float, ptr %idx, align 4 + %cmp = fcmp nnan nsz olt float %a, %fmin.j + %fmin.j.next = select nnan nsz i1 %cmp, float %a, float %fmin.j + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 2 + br i1 %cmp.j, label %for.j, label %for.i.latch + +for.i.latch: + %fmin.i.lcssa = phi float [ %fmin.j.next, %for.j ] + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 2 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + + +; Check that interchanging the loops is legal for the floating-point +; llvm.minimumnum. +; +; CHECK: --- !Pass +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: reduction_fmininumnum +define void @reduction_fmininumnum(ptr %A, float %init) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + %fmin.i = phi float [ %init, %entry ], [ %fmin.i.lcssa, %for.i.latch ] + br label %for.j + +for.j: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] + %fmin.j = phi float [ %fmin.i, %for.i.header ], [ %fmin.j.next, %for.j ] + %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i + %a = load float, ptr %idx, align 4 + %fmin.j.next = call float @llvm.minimumnum.f32(float %a, float %fmin.j) + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 2 + br i1 %cmp.j, label %for.j, label %for.i.latch + +for.i.latch: + %fmin.i.lcssa = phi float [ %fmin.j.next, %for.j ] + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 2 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; Check that interchanging the loops is legal for the reassociative +; floating-point maximum. +; +; float fmax = init; +; for (int i = 0; i < 2; i++) +; for (int j = 0; j < 2; j++) +; fmax = (A[j][i] > fmax) ? A[j][i] : fmax; + +; CHECK: --- !Pass +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: reduction_fmax +define void @reduction_fmax(ptr %A, float %init) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + %fmax.i = phi float [ %init, %entry ], [ %fmax.i.lcssa, %for.i.latch ] + br label %for.j + +for.j: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] + %fmax.j = phi float [ %fmax.i, %for.i.header ], [ %fmax.j.next, %for.j ] + %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i + %a = load float, ptr %idx, align 4 + %cmp = fcmp nnan nsz ogt float %a, %fmax.j + %fmax.j.next = select nnan nsz i1 %cmp, float %a, float %fmax.j + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 2 + br i1 %cmp.j, label %for.j, label %for.i.latch + +for.i.latch: + %fmax.i.lcssa = phi float [ %fmax.j.next, %for.j ] + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 2 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +; Check that interchanging the loops is legal for the floating-point +; llvm.maximumnum. + +; CHECK: --- !Pass +; CHECK-NEXT: Pass: loop-interchange +; CHECK-NEXT: Name: Interchanged +; CHECK-NEXT: Function: reduction_fmaxinumnum +define void @reduction_fmaxinumnum(ptr %A, float %init) { +entry: + br label %for.i.header + +for.i.header: + %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] + %fmax.i = phi float [ %init, %entry ], [ %fmax.i.lcssa, %for.i.latch ] + br label %for.j + +for.j: + %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] + %fmax.j = phi float [ %fmax.i, %for.i.header ], [ %fmax.j.next, %for.j ] + %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i + %a = load float, ptr %idx, align 4 + %fmax.j.next = call float @llvm.maximumnum.f32(float %a, float %fmax.j) + %j.inc = add i32 %j, 1 + %cmp.j = icmp slt i32 %j.inc, 2 + br i1 %cmp.j, label %for.j, label %for.i.latch + +for.i.latch: + %fmax.i.lcssa = phi float [ %fmax.j.next, %for.j ] + %i.inc = add i32 %i, 1 + %cmp.i = icmp slt i32 %i.inc, 2 + br i1 %cmp.i, label %for.i.header, label %exit + +exit: + ret void +} + +declare float @llvm.fmuladd.f32(float %a, float %b, float %c) +declare float @llvm.minimumnum.f32(float %a, float %b) +declare float @llvm.maximumnum.f32(float %a, float %b)
\ No newline at end of file diff --git a/llvm/test/Transforms/LoopInterchange/reductions-non-wrapped-operations.ll b/llvm/test/Transforms/LoopInterchange/reductions-non-wrapped-operations.ll index 0eb6fe9..f5c6ad7 100644 --- a/llvm/test/Transforms/LoopInterchange/reductions-non-wrapped-operations.ll +++ b/llvm/test/Transforms/LoopInterchange/reductions-non-wrapped-operations.ll @@ -333,437 +333,3 @@ for.i.latch: exit: ret void } - -; Check that the loops aren't exchanged if there is a reduction of -; non-reassociative floating-point addition. -; -; float sum = 0; -; for (int i = 0; i < 2; i++) -; for (int j = 0; j < 2; j++) -; sum += A[j][i]; - -; CHECK: --- !Missed -; CHECK-NEXT: Pass: loop-interchange -; CHECK-NEXT: Name: UnsupportedPHIOuter -; CHECK-NEXT: Function: reduction_fadd -define void @reduction_fadd(ptr %A) { -entry: - br label %for.i.header - -for.i.header: - %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] - %sum.i = phi float [ 0.0, %entry ], [ %sum.i.lcssa, %for.i.latch ] - br label %for.j - -for.j: - %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] - %sum.j = phi float [ %sum.i, %for.i.header ], [ %sum.j.next, %for.j ] - %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i - %a = load float, ptr %idx, align 4 - %sum.j.next = fadd float %sum.j, %a - %j.inc = add i32 %j, 1 - %cmp.j = icmp slt i32 %j.inc, 2 - br i1 %cmp.j, label %for.j, label %for.i.latch - -for.i.latch: - %sum.i.lcssa = phi float [ %sum.j.next, %for.j ] - %i.inc = add i32 %i, 1 - %cmp.i = icmp slt i32 %i.inc, 2 - br i1 %cmp.i, label %for.i.header, label %exit - -exit: - ret void -} - -; Check that the interchange is legal if the floating-point addition is marked -; as reassoc. -; -; CHECK: --- !Pass -; CHECK-NEXT: Pass: loop-interchange -; CHECK-NEXT: Name: Interchanged -; CHECK-NEXT: Function: reduction_reassoc_fadd -define void @reduction_reassoc_fadd(ptr %A) { -entry: - br label %for.i.header - -for.i.header: - %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] - %sum.i = phi float [ 0.0, %entry ], [ %sum.i.lcssa, %for.i.latch ] - br label %for.j - -for.j: - %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] - %sum.j = phi float [ %sum.i, %for.i.header ], [ %sum.j.next, %for.j ] - %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i - %a = load float, ptr %idx, align 4 - %sum.j.next = fadd reassoc float %sum.j, %a - %j.inc = add i32 %j, 1 - %cmp.j = icmp slt i32 %j.inc, 2 - br i1 %cmp.j, label %for.j, label %for.i.latch - -for.i.latch: - %sum.i.lcssa = phi float [ %sum.j.next, %for.j ] - %i.inc = add i32 %i, 1 - %cmp.i = icmp slt i32 %i.inc, 2 - br i1 %cmp.i, label %for.i.header, label %exit - -exit: - ret void -} - -; FIXME: Is it really legal to interchange the loops when -; both reassoc and ninf are set? -; Check that the interchange is legal if the floating-point addition is marked -; as reassoc. -; -; CHECK: --- !Pass -; CHECK-NEXT: Pass: loop-interchange -; CHECK-NEXT: Name: Interchanged -; CHECK-NEXT: Function: reduction_reassoc_ninf_fadd -define void @reduction_reassoc_ninf_fadd(ptr %A) { -entry: - br label %for.i.header - -for.i.header: - %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] - %sum.i = phi float [ 0.0, %entry ], [ %sum.i.lcssa, %for.i.latch ] - br label %for.j - -for.j: - %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] - %sum.j = phi float [ %sum.i, %for.i.header ], [ %sum.j.next, %for.j ] - %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i - %a = load float, ptr %idx, align 4 - %sum.j.next = fadd reassoc ninf float %sum.j, %a - %j.inc = add i32 %j, 1 - %cmp.j = icmp slt i32 %j.inc, 2 - br i1 %cmp.j, label %for.j, label %for.i.latch - -for.i.latch: - %sum.i.lcssa = phi float [ %sum.j.next, %for.j ] - %i.inc = add i32 %i, 1 - %cmp.i = icmp slt i32 %i.inc, 2 - br i1 %cmp.i, label %for.i.header, label %exit - -exit: - ret void -} - -; Check that the loops aren't exchanged if there is a reduction of -; non-reassociative floating-point multiplication. -; -; float prod = 1; -; for (int i = 0; i < 2; i++) -; for (int j = 0; j < 2; j++) -; prod *= A[j][i]; - -; CHECK: --- !Missed -; CHECK-NEXT: Pass: loop-interchange -; CHECK-NEXT: Name: UnsupportedPHIOuter -; CHECK-NEXT: Function: reduction_fmul -define void @reduction_fmul(ptr %A) { -entry: - br label %for.i.header - -for.i.header: - %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] - %prod.i = phi float [ 1.0, %entry ], [ %prod.i.lcssa, %for.i.latch ] - br label %for.j - -for.j: - %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] - %prod.j = phi float [ %prod.i, %for.i.header ], [ %prod.j.next, %for.j ] - %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i - %a = load float, ptr %idx, align 4 - %prod.j.next = fmul float %prod.j, %a - %j.inc = add i32 %j, 1 - %cmp.j = icmp slt i32 %j.inc, 2 - br i1 %cmp.j, label %for.j, label %for.i.latch - -for.i.latch: - %prod.i.lcssa = phi float [ %prod.j.next, %for.j ] - %i.inc = add i32 %i, 1 - %cmp.i = icmp slt i32 %i.inc, 2 - br i1 %cmp.i, label %for.i.header, label %exit - -exit: - ret void -} - -; Check that the interchange is legal if the floating-point multiplication is -; marked as reassoc. -; -; CHECK: --- !Pass -; CHECK-NEXT: Pass: loop-interchange -; CHECK-NEXT: Name: Interchanged -; CHECK-NEXT: Function: reduction_reassoc_fmul -define void @reduction_reassoc_fmul(ptr %A) { -entry: - br label %for.i.header - -for.i.header: - %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] - %prod.i = phi float [ 1.0, %entry ], [ %prod.i.lcssa, %for.i.latch ] - br label %for.j - -for.j: - %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] - %prod.j = phi float [ %prod.i, %for.i.header ], [ %prod.j.next, %for.j ] - %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i - %a = load float, ptr %idx, align 4 - %prod.j.next = fmul reassoc float %prod.j, %a - %j.inc = add i32 %j, 1 - %cmp.j = icmp slt i32 %j.inc, 2 - br i1 %cmp.j, label %for.j, label %for.i.latch - -for.i.latch: - %prod.i.lcssa = phi float [ %prod.j.next, %for.j ] - %i.inc = add i32 %i, 1 - %cmp.i = icmp slt i32 %i.inc, 2 - br i1 %cmp.i, label %for.i.header, label %exit - -exit: - ret void -} - -; Check that the loops aren't exchanged if there is a reduction of -; non-reassociative floating-point fmuladd. -; -; float fmuladd = 0; -; for (int i = 0; i < 2; i++) -; for (int j = 0; j < 2; j++) -; fmuladd += A[j][i] * B[j][i]; - -; CHECK: --- !Missed -; CHECK-NEXT: Pass: loop-interchange -; CHECK-NEXT: Name: UnsupportedPHIOuter -; CHECK-NEXT: Function: reduction_fmuladd -define void @reduction_fmuladd(ptr %A, ptr %B) { -entry: - br label %for.i.header - -for.i.header: - %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] - %fmuladd.i = phi float [ 1.0, %entry ], [ %fmuladd.i.lcssa, %for.i.latch ] - br label %for.j - -for.j: - %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] - %fmuladd.j = phi float [ %fmuladd.i, %for.i.header ], [ %fmuladd.j.next, %for.j ] - %idx.a = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i - %idx.b = getelementptr inbounds [2 x [2 x i32]], ptr %B, i32 0, i32 %j, i32 %i - %a = load float, ptr %idx.a, align 4 - %b = load float, ptr %idx.b, align 4 - %fmuladd.j.next = call float @llvm.fmuladd.f32(float %a, float %b, float %fmuladd.j) - %j.inc = add i32 %j, 1 - %cmp.j = icmp slt i32 %j.inc, 2 - br i1 %cmp.j, label %for.j, label %for.i.latch - -for.i.latch: - %fmuladd.i.lcssa = phi float [ %fmuladd.j.next, %for.j ] - %i.inc = add i32 %i, 1 - %cmp.i = icmp slt i32 %i.inc, 2 - br i1 %cmp.i, label %for.i.header, label %exit - -exit: - ret void -} - -; Check that the interchange is legal if the floating-point fmuladd is marked -; as reassoc. -; -; CHECK: --- !Pass -; CHECK-NEXT: Pass: loop-interchange -; CHECK-NEXT: Name: Interchanged -; CHECK-NEXT: Function: reduction_reassoc_fmuladd -define void @reduction_reassoc_fmuladd(ptr %A, ptr %B) { -entry: - br label %for.i.header - -for.i.header: - %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] - %fmuladd.i = phi float [ 1.0, %entry ], [ %fmuladd.i.lcssa, %for.i.latch ] - br label %for.j - -for.j: - %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] - %fmuladd.j = phi float [ %fmuladd.i, %for.i.header ], [ %fmuladd.j.next, %for.j ] - %idx.a = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i - %idx.b = getelementptr inbounds [2 x [2 x i32]], ptr %B, i32 0, i32 %j, i32 %i - %a = load float, ptr %idx.a, align 4 - %b = load float, ptr %idx.b, align 4 - %fmuladd.j.next = call reassoc float @llvm.fmuladd.f32(float %a, float %b, float %fmuladd.j) - %j.inc = add i32 %j, 1 - %cmp.j = icmp slt i32 %j.inc, 2 - br i1 %cmp.j, label %for.j, label %for.i.latch - -for.i.latch: - %fmuladd.i.lcssa = phi float [ %fmuladd.j.next, %for.j ] - %i.inc = add i32 %i, 1 - %cmp.i = icmp slt i32 %i.inc, 2 - br i1 %cmp.i, label %for.i.header, label %exit - -exit: - ret void -} - -; Check that interchanging the loops is legal for the reassociative -; floating-point minimum. -; -; float fmin = init; -; for (int i = 0; i < 2; i++) -; for (int j = 0; j < 2; j++) -; fmin = (A[j][i] < fmin) ? A[j][i] : fmin; - -; CHECK: --- !Pass -; CHECK-NEXT: Pass: loop-interchange -; CHECK-NEXT: Name: Interchanged -; CHECK-NEXT: Function: reduction_fmin -define void @reduction_fmin(ptr %A, float %init) { -entry: - br label %for.i.header - -for.i.header: - %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] - %fmin.i = phi float [ %init, %entry ], [ %fmin.i.lcssa, %for.i.latch ] - br label %for.j - -for.j: - %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] - %fmin.j = phi float [ %fmin.i, %for.i.header ], [ %fmin.j.next, %for.j ] - %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i - %a = load float, ptr %idx, align 4 - %cmp = fcmp nnan nsz olt float %a, %fmin.j - %fmin.j.next = select nnan nsz i1 %cmp, float %a, float %fmin.j - %j.inc = add i32 %j, 1 - %cmp.j = icmp slt i32 %j.inc, 2 - br i1 %cmp.j, label %for.j, label %for.i.latch - -for.i.latch: - %fmin.i.lcssa = phi float [ %fmin.j.next, %for.j ] - %i.inc = add i32 %i, 1 - %cmp.i = icmp slt i32 %i.inc, 2 - br i1 %cmp.i, label %for.i.header, label %exit - -exit: - ret void -} - - -; Check that interchanging the loops is legal for the floating-point -; llvm.minimumnum. -; -; CHECK: --- !Pass -; CHECK-NEXT: Pass: loop-interchange -; CHECK-NEXT: Name: Interchanged -; CHECK-NEXT: Function: reduction_fmininumnum -define void @reduction_fmininumnum(ptr %A, float %init) { -entry: - br label %for.i.header - -for.i.header: - %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] - %fmin.i = phi float [ %init, %entry ], [ %fmin.i.lcssa, %for.i.latch ] - br label %for.j - -for.j: - %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] - %fmin.j = phi float [ %fmin.i, %for.i.header ], [ %fmin.j.next, %for.j ] - %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i - %a = load float, ptr %idx, align 4 - %fmin.j.next = call float @llvm.minimumnum.f32(float %a, float %fmin.j) - %j.inc = add i32 %j, 1 - %cmp.j = icmp slt i32 %j.inc, 2 - br i1 %cmp.j, label %for.j, label %for.i.latch - -for.i.latch: - %fmin.i.lcssa = phi float [ %fmin.j.next, %for.j ] - %i.inc = add i32 %i, 1 - %cmp.i = icmp slt i32 %i.inc, 2 - br i1 %cmp.i, label %for.i.header, label %exit - -exit: - ret void -} - -; Check that interchanging the loops is legal for the reassociative -; floating-point maximum. -; -; float fmax = init; -; for (int i = 0; i < 2; i++) -; for (int j = 0; j < 2; j++) -; fmax = (A[j][i] > fmax) ? A[j][i] : fmax; - -; CHECK: --- !Pass -; CHECK-NEXT: Pass: loop-interchange -; CHECK-NEXT: Name: Interchanged -; CHECK-NEXT: Function: reduction_fmax -define void @reduction_fmax(ptr %A, float %init) { -entry: - br label %for.i.header - -for.i.header: - %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] - %fmax.i = phi float [ %init, %entry ], [ %fmax.i.lcssa, %for.i.latch ] - br label %for.j - -for.j: - %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] - %fmax.j = phi float [ %fmax.i, %for.i.header ], [ %fmax.j.next, %for.j ] - %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i - %a = load float, ptr %idx, align 4 - %cmp = fcmp nnan nsz ogt float %a, %fmax.j - %fmax.j.next = select nnan nsz i1 %cmp, float %a, float %fmax.j - %j.inc = add i32 %j, 1 - %cmp.j = icmp slt i32 %j.inc, 2 - br i1 %cmp.j, label %for.j, label %for.i.latch - -for.i.latch: - %fmax.i.lcssa = phi float [ %fmax.j.next, %for.j ] - %i.inc = add i32 %i, 1 - %cmp.i = icmp slt i32 %i.inc, 2 - br i1 %cmp.i, label %for.i.header, label %exit - -exit: - ret void -} - -; Check that interchanging the loops is legal for the floating-point -; llvm.maximumnum. - -; CHECK: --- !Pass -; CHECK-NEXT: Pass: loop-interchange -; CHECK-NEXT: Name: Interchanged -; CHECK-NEXT: Function: reduction_fmaxinumnum -define void @reduction_fmaxinumnum(ptr %A, float %init) { -entry: - br label %for.i.header - -for.i.header: - %i = phi i32 [ 0, %entry ], [ %i.inc, %for.i.latch ] - %fmax.i = phi float [ %init, %entry ], [ %fmax.i.lcssa, %for.i.latch ] - br label %for.j - -for.j: - %j = phi i32 [ 0, %for.i.header ], [ %j.inc, %for.j ] - %fmax.j = phi float [ %fmax.i, %for.i.header ], [ %fmax.j.next, %for.j ] - %idx = getelementptr inbounds [2 x [2 x i32]], ptr %A, i32 0, i32 %j, i32 %i - %a = load float, ptr %idx, align 4 - %fmax.j.next = call float @llvm.maximumnum.f32(float %a, float %fmax.j) - %j.inc = add i32 %j, 1 - %cmp.j = icmp slt i32 %j.inc, 2 - br i1 %cmp.j, label %for.j, label %for.i.latch - -for.i.latch: - %fmax.i.lcssa = phi float [ %fmax.j.next, %for.j ] - %i.inc = add i32 %i, 1 - %cmp.i = icmp slt i32 %i.inc, 2 - br i1 %cmp.i, label %for.i.header, label %exit - -exit: - ret void -} - -declare float @llvm.fmuladd.f32(float %a, float %b, float %c) -declare float @llvm.minimumnum.f32(float %a, float %b) -declare float @llvm.maximumnum.f32(float %a, float %b) diff --git a/llvm/test/Transforms/LoopUnroll/AArch64/apple-unrolling.ll b/llvm/test/Transforms/LoopUnroll/AArch64/apple-unrolling.ll index 1a091e8..0b78bee 100644 --- a/llvm/test/Transforms/LoopUnroll/AArch64/apple-unrolling.ll +++ b/llvm/test/Transforms/LoopUnroll/AArch64/apple-unrolling.ll @@ -578,8 +578,323 @@ loop.latch: exit: ret void } + +define i32 @test_add_reduction_unroll_partial(ptr %a, i64 noundef %n) { +; APPLE-LABEL: define i32 @test_add_reduction_unroll_partial( +; APPLE-SAME: ptr [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] { +; APPLE-NEXT: [[ENTRY:.*]]: +; APPLE-NEXT: br label %[[LOOP:.*]] +; APPLE: [[LOOP]]: +; APPLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; APPLE-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT:%.*]], %[[LOOP]] ] +; APPLE-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]] +; APPLE-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_A]], align 2 +; APPLE-NEXT: [[RDX_NEXT]] = add nuw nsw i32 [[RDX]], [[TMP0]] +; APPLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; APPLE-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 +; APPLE-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; APPLE: [[EXIT]]: +; APPLE-NEXT: [[BIN_RDX2:%.*]] = phi i32 [ [[RDX_NEXT]], %[[LOOP]] ] +; APPLE-NEXT: ret i32 [[BIN_RDX2]] +; +; OTHER-LABEL: define i32 @test_add_reduction_unroll_partial( +; OTHER-SAME: ptr [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] { +; OTHER-NEXT: [[ENTRY:.*]]: +; OTHER-NEXT: br label %[[LOOP:.*]] +; OTHER: [[LOOP]]: +; OTHER-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_3:%.*]], %[[LOOP]] ] +; OTHER-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP]] ] +; OTHER-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]] +; OTHER-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_A]], align 2 +; OTHER-NEXT: [[RDX_NEXT:%.*]] = add nuw nsw i32 [[RDX]], [[TMP0]] +; OTHER-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; OTHER-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT]] +; OTHER-NEXT: [[TMP1:%.*]] = load i32, ptr [[GEP_A_1]], align 2 +; OTHER-NEXT: [[RDX_2:%.*]] = add nuw nsw i32 [[RDX_NEXT]], [[TMP1]] +; OTHER-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2 +; OTHER-NEXT: [[GEP_A_2:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT_1]] +; OTHER-NEXT: [[TMP2:%.*]] = load i32, ptr [[GEP_A_2]], align 2 +; OTHER-NEXT: [[RDX_NEXT_2:%.*]] = add nuw nsw i32 [[RDX_2]], [[TMP2]] +; OTHER-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3 +; OTHER-NEXT: [[GEP_A_3:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT_2]] +; OTHER-NEXT: [[TMP3:%.*]] = load i32, ptr [[GEP_A_3]], align 2 +; OTHER-NEXT: [[RDX_NEXT_3]] = add nuw nsw i32 [[RDX_NEXT_2]], [[TMP3]] +; OTHER-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4 +; OTHER-NEXT: [[EC_3:%.*]] = icmp eq i64 [[IV_NEXT_3]], 1024 +; OTHER-NEXT: br i1 [[EC_3]], label %[[EXIT:.*]], label %[[LOOP]] +; OTHER: [[EXIT]]: +; OTHER-NEXT: [[BIN_RDX2:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ] +; OTHER-NEXT: ret i32 [[BIN_RDX2]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi i32 [ 0, %entry ], [ %rdx.next, %loop ] + %gep.a = getelementptr inbounds nuw i32, ptr %a, i64 %iv + %1 = load i32, ptr %gep.a, align 2 + %rdx.next = add nuw nsw i32 %rdx, %1 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 1024 + br i1 %ec, label %exit, label %loop + +exit: + %res = phi i32 [ %rdx.next, %loop ] + ret i32 %res +} + +declare i1 @cond() + +define i32 @test_add_reduction_multi_block(ptr %a, i64 noundef %n) { +; APPLE-LABEL: define i32 @test_add_reduction_multi_block( +; APPLE-SAME: ptr [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] { +; APPLE-NEXT: [[ENTRY:.*]]: +; APPLE-NEXT: br label %[[LOOP:.*]] +; APPLE: [[LOOP]]: +; APPLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; APPLE-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT:%.*]], %[[LOOP_LATCH]] ] +; APPLE-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]] +; APPLE-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_A]], align 2 +; APPLE-NEXT: [[C:%.*]] = call i1 @cond() +; APPLE-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] +; APPLE: [[THEN]]: +; APPLE-NEXT: store i32 0, ptr [[GEP_A]], align 4 +; APPLE-NEXT: br label %[[LOOP_LATCH]] +; APPLE: [[LOOP_LATCH]]: +; APPLE-NEXT: [[RDX_NEXT]] = add nuw nsw i32 [[RDX]], [[TMP0]] +; APPLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; APPLE-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 +; APPLE-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; APPLE: [[EXIT]]: +; APPLE-NEXT: [[RES:%.*]] = phi i32 [ [[RDX_NEXT]], %[[LOOP_LATCH]] ] +; APPLE-NEXT: ret i32 [[RES]] +; +; OTHER-LABEL: define i32 @test_add_reduction_multi_block( +; OTHER-SAME: ptr [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] { +; OTHER-NEXT: [[ENTRY:.*]]: +; OTHER-NEXT: br label %[[LOOP:.*]] +; OTHER: [[LOOP]]: +; OTHER-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; OTHER-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT:%.*]], %[[LOOP_LATCH]] ] +; OTHER-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]] +; OTHER-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_A]], align 2 +; OTHER-NEXT: [[C:%.*]] = call i1 @cond() +; OTHER-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]] +; OTHER: [[THEN]]: +; OTHER-NEXT: store i32 0, ptr [[GEP_A]], align 4 +; OTHER-NEXT: br label %[[LOOP_LATCH]] +; OTHER: [[LOOP_LATCH]]: +; OTHER-NEXT: [[RDX_NEXT]] = add nuw nsw i32 [[RDX]], [[TMP0]] +; OTHER-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; OTHER-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 +; OTHER-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; OTHER: [[EXIT]]: +; OTHER-NEXT: [[RES:%.*]] = phi i32 [ [[RDX_NEXT]], %[[LOOP_LATCH]] ] +; OTHER-NEXT: ret i32 [[RES]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] + %rdx = phi i32 [ 0, %entry ], [ %rdx.next, %loop.latch ] + %gep.a = getelementptr inbounds nuw i32, ptr %a, i64 %iv + %1 = load i32, ptr %gep.a, align 2 + %c = call i1 @cond() + br i1 %c, label %then, label %loop.latch + +then: + store i32 0, ptr %gep.a + br label %loop.latch + +loop.latch: + %rdx.next = add nuw nsw i32 %rdx, %1 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 1024 + br i1 %ec, label %exit, label %loop + +exit: + %res = phi i32 [ %rdx.next, %loop.latch ] + ret i32 %res +} + +define i32 @test_add_and_mul_reduction_unroll_partial(ptr %a, i64 noundef %n) { +; APPLE-LABEL: define i32 @test_add_and_mul_reduction_unroll_partial( +; APPLE-SAME: ptr [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] { +; APPLE-NEXT: [[ENTRY:.*]]: +; APPLE-NEXT: br label %[[LOOP:.*]] +; APPLE: [[LOOP]]: +; APPLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; APPLE-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT:%.*]], %[[LOOP]] ] +; APPLE-NEXT: [[RDX_2:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_2_NEXT:%.*]], %[[LOOP]] ] +; APPLE-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]] +; APPLE-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_A]], align 2 +; APPLE-NEXT: [[RDX_NEXT]] = add nuw nsw i32 [[RDX]], [[TMP0]] +; APPLE-NEXT: [[RDX_2_NEXT]] = mul i32 [[RDX_2]], [[TMP0]] +; APPLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; APPLE-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 +; APPLE-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; APPLE: [[EXIT]]: +; APPLE-NEXT: [[BIN_RDX3:%.*]] = phi i32 [ [[RDX_NEXT]], %[[LOOP]] ] +; APPLE-NEXT: [[RES_2:%.*]] = phi i32 [ [[RDX_2_NEXT]], %[[LOOP]] ] +; APPLE-NEXT: [[SUM:%.*]] = add i32 [[BIN_RDX3]], [[RES_2]] +; APPLE-NEXT: ret i32 [[SUM]] +; +; OTHER-LABEL: define i32 @test_add_and_mul_reduction_unroll_partial( +; OTHER-SAME: ptr [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] { +; OTHER-NEXT: [[ENTRY:.*]]: +; OTHER-NEXT: br label %[[LOOP:.*]] +; OTHER: [[LOOP]]: +; OTHER-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_1:%.*]], %[[LOOP]] ] +; OTHER-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT_1:%.*]], %[[LOOP]] ] +; OTHER-NEXT: [[RDX_2:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_2_NEXT_1:%.*]], %[[LOOP]] ] +; OTHER-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]] +; OTHER-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP_A]], align 2 +; OTHER-NEXT: [[RDX_NEXT:%.*]] = add nuw nsw i32 [[RDX]], [[TMP0]] +; OTHER-NEXT: [[RDX_2_NEXT:%.*]] = mul i32 [[RDX_2]], [[TMP0]] +; OTHER-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; OTHER-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT]] +; OTHER-NEXT: [[TMP1:%.*]] = load i32, ptr [[GEP_A_1]], align 2 +; OTHER-NEXT: [[RDX_NEXT_1]] = add nuw nsw i32 [[RDX_NEXT]], [[TMP1]] +; OTHER-NEXT: [[RDX_2_NEXT_1]] = mul i32 [[RDX_2_NEXT]], [[TMP1]] +; OTHER-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 +; OTHER-NEXT: [[EC_1:%.*]] = icmp eq i64 [[IV_NEXT_1]], 1024 +; OTHER-NEXT: br i1 [[EC_1]], label %[[EXIT:.*]], label %[[LOOP]] +; OTHER: [[EXIT]]: +; OTHER-NEXT: [[BIN_RDX:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ] +; OTHER-NEXT: [[RES_2:%.*]] = phi i32 [ [[RDX_2_NEXT_1]], %[[LOOP]] ] +; OTHER-NEXT: [[SUM:%.*]] = add i32 [[BIN_RDX]], [[RES_2]] +; OTHER-NEXT: ret i32 [[SUM]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi i32 [ 0, %entry ], [ %rdx.next, %loop ] + %rdx.2 = phi i32 [ 0, %entry ], [ %rdx.2.next, %loop ] + %gep.a = getelementptr inbounds nuw i32, ptr %a, i64 %iv + %1 = load i32, ptr %gep.a, align 2 + %rdx.next = add nuw nsw i32 %rdx, %1 + %rdx.2.next = mul i32 %rdx.2, %1 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 1024 + br i1 %ec, label %exit, label %loop + +exit: + %res.1 = phi i32 [ %rdx.next, %loop ] + %res.2 = phi i32 [ %rdx.2.next, %loop ] + %sum = add i32 %res.1, %res.2 + ret i32 %sum +} + + +define i32 @test_add_reduction_runtime(ptr %a, i64 noundef %n) { +; APPLE-LABEL: define i32 @test_add_reduction_runtime( +; APPLE-SAME: ptr [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] { +; APPLE-NEXT: [[ENTRY:.*]]: +; APPLE-NEXT: br label %[[LOOP:.*]] +; APPLE: [[LOOP]]: +; APPLE-NEXT: [[IV_EPIL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_EPIL:%.*]], %[[LOOP]] ] +; APPLE-NEXT: [[RDX_EPIL:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT_EPIL:%.*]], %[[LOOP]] ] +; APPLE-NEXT: [[GEP_A_EPIL:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_EPIL]] +; APPLE-NEXT: [[TMP6:%.*]] = load i32, ptr [[GEP_A_EPIL]], align 2 +; APPLE-NEXT: [[RDX_NEXT_EPIL]] = add nuw nsw i32 [[RDX_EPIL]], [[TMP6]] +; APPLE-NEXT: [[IV_NEXT_EPIL]] = add nuw nsw i64 [[IV_EPIL]], 1 +; APPLE-NEXT: [[EC_EPIL:%.*]] = icmp eq i64 [[IV_NEXT_EPIL]], [[N]] +; APPLE-NEXT: br i1 [[EC_EPIL]], label %[[EXIT:.*]], label %[[LOOP]] +; APPLE: [[EXIT]]: +; APPLE-NEXT: [[RES:%.*]] = phi i32 [ [[RDX_NEXT_EPIL]], %[[LOOP]] ] +; APPLE-NEXT: ret i32 [[RES]] +; +; OTHER-LABEL: define i32 @test_add_reduction_runtime( +; OTHER-SAME: ptr [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] { +; OTHER-NEXT: [[ENTRY:.*]]: +; OTHER-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 +; OTHER-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 3 +; OTHER-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 3 +; OTHER-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; OTHER: [[ENTRY_NEW]]: +; OTHER-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] +; OTHER-NEXT: br label %[[LOOP:.*]] +; OTHER: [[LOOP]]: +; OTHER-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[IV_NEXT_3:%.*]], %[[LOOP]] ] +; OTHER-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY_NEW]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP]] ] +; OTHER-NEXT: [[NITER:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[NITER_NEXT_3:%.*]], %[[LOOP]] ] +; OTHER-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]] +; OTHER-NEXT: [[TMP2:%.*]] = load i32, ptr [[GEP_A]], align 2 +; OTHER-NEXT: [[RDX_NEXT:%.*]] = add nuw nsw i32 [[RDX]], [[TMP2]] +; OTHER-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; OTHER-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT]] +; OTHER-NEXT: [[TMP3:%.*]] = load i32, ptr [[GEP_A_1]], align 2 +; OTHER-NEXT: [[RDX_2:%.*]] = add nuw nsw i32 [[RDX_NEXT]], [[TMP3]] +; OTHER-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2 +; OTHER-NEXT: [[GEP_A_2:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT_1]] +; OTHER-NEXT: [[TMP4:%.*]] = load i32, ptr [[GEP_A_2]], align 2 +; OTHER-NEXT: [[RDX_NEXT_2:%.*]] = add nuw nsw i32 [[RDX_2]], [[TMP4]] +; OTHER-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3 +; OTHER-NEXT: [[GEP_A_3:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT_2]] +; OTHER-NEXT: [[TMP5:%.*]] = load i32, ptr [[GEP_A_3]], align 2 +; OTHER-NEXT: [[RDX_NEXT_3]] = add nuw nsw i32 [[RDX_NEXT_2]], [[TMP5]] +; OTHER-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4 +; OTHER-NEXT: [[NITER_NEXT_3]] = add i64 [[NITER]], 4 +; OTHER-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i64 [[NITER_NEXT_3]], [[UNROLL_ITER]] +; OTHER-NEXT: br i1 [[NITER_NCMP_3]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP]] +; OTHER: [[EXIT_UNR_LCSSA_LOOPEXIT]]: +; OTHER-NEXT: [[RES_PH_PH:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ] +; OTHER-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_3]], %[[LOOP]] ] +; OTHER-NEXT: [[RDX_UNR_PH:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ] +; OTHER-NEXT: br label %[[EXIT_UNR_LCSSA]] +; OTHER: [[EXIT_UNR_LCSSA]]: +; OTHER-NEXT: [[RES_PH:%.*]] = phi i32 [ poison, %[[ENTRY]] ], [ [[RES_PH_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; OTHER-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; OTHER-NEXT: [[RDX_UNR:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; OTHER-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; OTHER-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; OTHER: [[LOOP_EPIL_PREHEADER]]: +; OTHER-NEXT: br label %[[LOOP_EPIL:.*]] +; OTHER: [[LOOP_EPIL]]: +; OTHER-NEXT: [[IV_EPIL:%.*]] = phi i64 [ [[IV_UNR]], %[[LOOP_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] +; OTHER-NEXT: [[RDX_EPIL:%.*]] = phi i32 [ [[RDX_UNR]], %[[LOOP_EPIL_PREHEADER]] ], [ [[RDX_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] +; OTHER-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ 0, %[[LOOP_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], %[[LOOP_EPIL]] ] +; OTHER-NEXT: [[GEP_A_EPIL:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_EPIL]] +; OTHER-NEXT: [[TMP6:%.*]] = load i32, ptr [[GEP_A_EPIL]], align 2 +; OTHER-NEXT: [[RDX_NEXT_EPIL]] = add nuw nsw i32 [[RDX_EPIL]], [[TMP6]] +; OTHER-NEXT: [[IV_NEXT_EPIL]] = add nuw nsw i64 [[IV_EPIL]], 1 +; OTHER-NEXT: [[EC_EPIL:%.*]] = icmp eq i64 [[IV_NEXT_EPIL]], [[N]] +; OTHER-NEXT: [[EPIL_ITER_NEXT]] = add i64 [[EPIL_ITER]], 1 +; OTHER-NEXT: [[EPIL_ITER_CMP:%.*]] = icmp ne i64 [[EPIL_ITER_NEXT]], [[XTRAITER]] +; OTHER-NEXT: br i1 [[EPIL_ITER_CMP]], label %[[LOOP_EPIL]], label %[[EXIT_EPILOG_LCSSA:.*]], !llvm.loop [[LOOP0:![0-9]+]] +; OTHER: [[EXIT_EPILOG_LCSSA]]: +; OTHER-NEXT: [[RES_PH1:%.*]] = phi i32 [ [[RDX_NEXT_EPIL]], %[[LOOP_EPIL]] ] +; OTHER-NEXT: br label %[[EXIT]] +; OTHER: [[EXIT]]: +; OTHER-NEXT: [[RES:%.*]] = phi i32 [ [[RES_PH]], %[[EXIT_UNR_LCSSA]] ], [ [[RES_PH1]], %[[EXIT_EPILOG_LCSSA]] ] +; OTHER-NEXT: ret i32 [[RES]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi i32 [ 0, %entry ], [ %rdx.next, %loop ] + %gep.a = getelementptr inbounds nuw i32, ptr %a, i64 %iv + %1 = load i32, ptr %gep.a, align 2 + %rdx.next = add nuw nsw i32 %rdx, %1 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop + +exit: + %res = phi i32 [ %rdx.next, %loop ] + ret i32 %res +} ;. ; APPLE: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]} ; APPLE: [[META1]] = !{!"llvm.loop.unroll.disable"} ; APPLE: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]} ;. +; OTHER: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]} +; OTHER: [[META1]] = !{!"llvm.loop.unroll.disable"} +;. diff --git a/llvm/test/Transforms/LoopUnroll/partial-unroll-reductions.ll b/llvm/test/Transforms/LoopUnroll/partial-unroll-reductions.ll new file mode 100644 index 0000000..953dc278 --- /dev/null +++ b/llvm/test/Transforms/LoopUnroll/partial-unroll-reductions.ll @@ -0,0 +1,446 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -p loop-unroll -unroll-allow-partial -unroll-max-count=4 -S %s | FileCheck %s + +define i32 @test_add(ptr %src, i64 %n, i32 %start) { +; CHECK-LABEL: define i32 @test_add( +; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], %[[ENTRY]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 1 +; CHECK-NEXT: [[RDX_NEXT:%.*]] = add i32 [[RDX]], [[L]] +; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[L_1:%.*]] = load i32, ptr [[GEP_SRC_1]], align 1 +; CHECK-NEXT: [[RDX_NEXT_1:%.*]] = add i32 [[RDX_NEXT]], [[L_1]] +; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3 +; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT_1]] +; CHECK-NEXT: [[L_2:%.*]] = load i32, ptr [[GEP_SRC_2]], align 1 +; CHECK-NEXT: [[RDX_NEXT_2:%.*]] = add i32 [[RDX_NEXT_1]], [[L_2]] +; CHECK-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4 +; CHECK-NEXT: [[GEP_SRC_24:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT_2]] +; CHECK-NEXT: [[L_24:%.*]] = load i32, ptr [[GEP_SRC_24]], align 1 +; CHECK-NEXT: [[RDX_NEXT_3]] = add i32 [[RDX_NEXT_2]], [[L_24]] +; CHECK-NEXT: [[EC_3:%.*]] = icmp ne i64 [[IV_NEXT_3]], 1000 +; CHECK-NEXT: br i1 [[EC_3]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RDX_NEXT_LCSSA:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ] +; CHECK-NEXT: ret i32 [[RDX_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi i32 [ %start, %entry ], [ %rdx.next, %loop ] + %iv.next = add i64 %iv, 1 + %gep.src = getelementptr i32, ptr %src, i64 %iv + %l = load i32 , ptr %gep.src, align 1 + %rdx.next = add i32 %rdx, %l + %ec = icmp ne i64 %iv.next, 1000 + br i1 %ec, label %loop, label %exit + +exit: + ret i32 %rdx.next +} + +define i32 @test_add_tc_not_multiple_of_4(ptr %src, i64 %n, i32 %start) { +; CHECK-LABEL: define i32 @test_add_tc_not_multiple_of_4( +; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_3:%.*]], %[[LOOP_1:.*]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], %[[ENTRY]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP_1]] ] +; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 1 +; CHECK-NEXT: [[RDX_NEXT:%.*]] = add i32 [[RDX]], [[L]] +; CHECK-NEXT: [[EC:%.*]] = icmp ne i64 [[IV_NEXT]], 1001 +; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_1]], label %[[EXIT:.*]] +; CHECK: [[LOOP_1]]: +; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[L_1:%.*]] = load i32, ptr [[GEP_SRC_1]], align 1 +; CHECK-NEXT: [[RDX_NEXT_1:%.*]] = add i32 [[RDX_NEXT]], [[L_1]] +; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3 +; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT_1]] +; CHECK-NEXT: [[L_2:%.*]] = load i32, ptr [[GEP_SRC_2]], align 1 +; CHECK-NEXT: [[RDX_NEXT_2:%.*]] = add i32 [[RDX_NEXT_1]], [[L_2]] +; CHECK-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4 +; CHECK-NEXT: [[GEP_SRC_12:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT_2]] +; CHECK-NEXT: [[L_12:%.*]] = load i32, ptr [[GEP_SRC_12]], align 1 +; CHECK-NEXT: [[RDX_NEXT_3]] = add i32 [[RDX_NEXT_2]], [[L_12]] +; CHECK-NEXT: br label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RDX_NEXT_LCSSA:%.*]] = phi i32 [ [[RDX_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: ret i32 [[RDX_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi i32 [ %start, %entry ], [ %rdx.next, %loop ] + %iv.next = add i64 %iv, 1 + %gep.src = getelementptr i32, ptr %src, i64 %iv + %l = load i32 , ptr %gep.src, align 1 + %rdx.next = add i32 %rdx, %l + %ec = icmp ne i64 %iv.next, 1001 + br i1 %ec, label %loop, label %exit + +exit: + ret i32 %rdx.next +} + +define i32 @test_add_rdx_used_in_loop(ptr %src, i64 %n, i32 %start) { +; CHECK-LABEL: define i32 @test_add_rdx_used_in_loop( +; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], %[[ENTRY]] ], [ [[RDX_NEXT_24:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 1 +; CHECK-NEXT: [[RDX_NEXT:%.*]] = add i32 [[RDX]], [[L]] +; CHECK-NEXT: store i32 [[RDX_NEXT]], ptr [[GEP_SRC]], align 4 +; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[L_1:%.*]] = load i32, ptr [[GEP_SRC_1]], align 1 +; CHECK-NEXT: [[RDX_NEXT_1:%.*]] = add i32 [[RDX_NEXT]], [[L_1]] +; CHECK-NEXT: store i32 [[RDX_NEXT_1]], ptr [[GEP_SRC_1]], align 4 +; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3 +; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT_1]] +; CHECK-NEXT: [[L_2:%.*]] = load i32, ptr [[GEP_SRC_2]], align 1 +; CHECK-NEXT: [[RDX_NEXT_2:%.*]] = add i32 [[RDX_NEXT_1]], [[L_2]] +; CHECK-NEXT: store i32 [[RDX_NEXT_2]], ptr [[GEP_SRC_2]], align 4 +; CHECK-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4 +; CHECK-NEXT: [[GEP_SRC_24:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT_2]] +; CHECK-NEXT: [[L_24:%.*]] = load i32, ptr [[GEP_SRC_24]], align 1 +; CHECK-NEXT: [[RDX_NEXT_24]] = add i32 [[RDX_NEXT_2]], [[L_24]] +; CHECK-NEXT: store i32 [[RDX_NEXT_24]], ptr [[GEP_SRC_24]], align 4 +; CHECK-NEXT: [[EC_3:%.*]] = icmp ne i64 [[IV_NEXT_3]], 1000 +; CHECK-NEXT: br i1 [[EC_3]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RDX_NEXT_LCSSA:%.*]] = phi i32 [ [[RDX_NEXT_24]], %[[LOOP]] ] +; CHECK-NEXT: ret i32 [[RDX_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi i32 [ %start, %entry ], [ %rdx.next, %loop ] + %iv.next = add i64 %iv, 1 + %gep.src = getelementptr i32, ptr %src, i64 %iv + %l = load i32 , ptr %gep.src, align 1 + %rdx.next = add i32 %rdx, %l + store i32 %rdx.next, ptr %gep.src + %ec = icmp ne i64 %iv.next, 1000 + br i1 %ec, label %loop, label %exit + +exit: + ret i32 %rdx.next +} + +define i32 @test_add_phi_used_outside_loop(ptr %src, i64 %n, i32 %start) { +; CHECK-LABEL: define i32 @test_add_phi_used_outside_loop( +; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], %[[ENTRY]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 1 +; CHECK-NEXT: [[RDX_NEXT:%.*]] = add i32 [[RDX]], [[L]] +; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[L_1:%.*]] = load i32, ptr [[GEP_SRC_1]], align 1 +; CHECK-NEXT: [[RDX_NEXT_1:%.*]] = add i32 [[RDX_NEXT]], [[L_1]] +; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3 +; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT_1]] +; CHECK-NEXT: [[L_2:%.*]] = load i32, ptr [[GEP_SRC_2]], align 1 +; CHECK-NEXT: [[RDX_NEXT_2:%.*]] = add i32 [[RDX_NEXT_1]], [[L_2]] +; CHECK-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4 +; CHECK-NEXT: [[GEP_SRC_24:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT_2]] +; CHECK-NEXT: [[L_24:%.*]] = load i32, ptr [[GEP_SRC_24]], align 1 +; CHECK-NEXT: [[RDX_NEXT_3]] = add i32 [[RDX_NEXT_2]], [[L_24]] +; CHECK-NEXT: [[EC_3:%.*]] = icmp ne i64 [[IV_NEXT_3]], 1000 +; CHECK-NEXT: br i1 [[EC_3]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RDX_LCSSA:%.*]] = phi i32 [ [[RDX_NEXT_2]], %[[LOOP]] ] +; CHECK-NEXT: ret i32 [[RDX_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi i32 [ %start, %entry ], [ %rdx.next, %loop ] + %iv.next = add i64 %iv, 1 + %gep.src = getelementptr i32, ptr %src, i64 %iv + %l = load i32 , ptr %gep.src, align 1 + %rdx.next = add i32 %rdx, %l + %ec = icmp ne i64 %iv.next, 1000 + br i1 %ec, label %loop, label %exit + +exit: + ret i32 %rdx +} + +define i32 @test_add_and_mul_reduction(ptr %src, i64 %n, i32 %start) { +; CHECK-LABEL: define i32 @test_add_and_mul_reduction( +; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]], i32 [[START:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_1:%.*]] = phi i32 [ [[START]], %[[ENTRY]] ], [ [[RDX_1_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_2:%.*]] = phi i32 [ [[START]], %[[ENTRY]] ], [ [[RDX_2_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 1 +; CHECK-NEXT: [[RDX_1_NEXT:%.*]] = add i32 [[RDX_1]], [[L]] +; CHECK-NEXT: [[RDX_2_NEXT:%.*]] = mul i32 [[RDX_2]], [[L]] +; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[L_1:%.*]] = load i32, ptr [[GEP_SRC_1]], align 1 +; CHECK-NEXT: [[RDX_1_2:%.*]] = add i32 [[RDX_1_NEXT]], [[L_1]] +; CHECK-NEXT: [[RDX_2_2:%.*]] = mul i32 [[RDX_2_NEXT]], [[L_1]] +; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3 +; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT_1]] +; CHECK-NEXT: [[L_2:%.*]] = load i32, ptr [[GEP_SRC_2]], align 1 +; CHECK-NEXT: [[RDX_1_NEXT_2:%.*]] = add i32 [[RDX_1_2]], [[L_2]] +; CHECK-NEXT: [[RDX_2_NEXT_2:%.*]] = mul i32 [[RDX_2_2]], [[L_2]] +; CHECK-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4 +; CHECK-NEXT: [[GEP_SRC_24:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT_2]] +; CHECK-NEXT: [[L_24:%.*]] = load i32, ptr [[GEP_SRC_24]], align 1 +; CHECK-NEXT: [[RDX_1_NEXT_3]] = add i32 [[RDX_1_NEXT_2]], [[L_24]] +; CHECK-NEXT: [[RDX_2_NEXT_3]] = mul i32 [[RDX_2_NEXT_2]], [[L_24]] +; CHECK-NEXT: [[EC_3:%.*]] = icmp ne i64 [[IV_NEXT_3]], 1000 +; CHECK-NEXT: br i1 [[EC_3]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RDX_1_NEXT_LCSSA:%.*]] = phi i32 [ [[RDX_1_NEXT_3]], %[[LOOP]] ] +; CHECK-NEXT: [[BIN_RDX5:%.*]] = phi i32 [ [[RDX_2_NEXT_3]], %[[LOOP]] ] +; CHECK-NEXT: [[RES:%.*]] = add i32 [[RDX_1_NEXT_LCSSA]], [[BIN_RDX5]] +; CHECK-NEXT: ret i32 [[RES]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx.1 = phi i32 [ %start, %entry ], [ %rdx.1.next, %loop ] + %rdx.2 = phi i32 [ %start, %entry ], [ %rdx.2.next, %loop ] + %iv.next = add i64 %iv, 1 + %gep.src = getelementptr i32, ptr %src, i64 %iv + %l = load i32 , ptr %gep.src, align 1 + %rdx.1.next = add i32 %rdx.1, %l + %rdx.2.next = mul i32 %rdx.2, %l + %ec = icmp ne i64 %iv.next, 1000 + br i1 %ec, label %loop, label %exit + +exit: + %res = add i32 %rdx.1.next, %rdx.2.next + ret i32 %res +} + +define float @test_fadd_no_fmfs(ptr %src, i64 %n, float %start) { +; CHECK-LABEL: define float @test_fadd_no_fmfs( +; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]], float [[START:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi float [ [[START]], %[[ENTRY]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr float, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP_SRC]], align 1 +; CHECK-NEXT: [[RDX_NEXT:%.*]] = fadd float [[RDX]], [[L]] +; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr float, ptr [[SRC]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 1 +; CHECK-NEXT: [[RDX_NEXT_1:%.*]] = fadd float [[RDX_NEXT]], [[L_1]] +; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3 +; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr float, ptr [[SRC]], i64 [[IV_NEXT_1]] +; CHECK-NEXT: [[L_2:%.*]] = load float, ptr [[GEP_SRC_2]], align 1 +; CHECK-NEXT: [[RDX_NEXT_2:%.*]] = fadd float [[RDX_NEXT_1]], [[L_2]] +; CHECK-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4 +; CHECK-NEXT: [[GEP_SRC_24:%.*]] = getelementptr float, ptr [[SRC]], i64 [[IV_NEXT_2]] +; CHECK-NEXT: [[L_24:%.*]] = load float, ptr [[GEP_SRC_24]], align 1 +; CHECK-NEXT: [[RDX_NEXT_3]] = fadd float [[RDX_NEXT_2]], [[L_24]] +; CHECK-NEXT: [[EC_3:%.*]] = icmp ne i64 [[IV_NEXT_3]], 1000 +; CHECK-NEXT: br i1 [[EC_3]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RDX_NEXT_LCSSA:%.*]] = phi float [ [[RDX_NEXT_3]], %[[LOOP]] ] +; CHECK-NEXT: ret float [[RDX_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi float [ %start, %entry ], [ %rdx.next, %loop ] + %iv.next = add i64 %iv, 1 + %gep.src = getelementptr float, ptr %src, i64 %iv + %l = load float, ptr %gep.src, align 1 + %rdx.next = fadd float %rdx, %l + %ec = icmp ne i64 %iv.next, 1000 + br i1 %ec, label %loop, label %exit + +exit: + ret float %rdx.next +} + +define float @test_fadd_with_ressaoc(ptr %src, i64 %n, float %start) { +; CHECK-LABEL: define float @test_fadd_with_ressaoc( +; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]], float [[START:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi float [ [[START]], %[[ENTRY]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr float, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP_SRC]], align 1 +; CHECK-NEXT: [[RDX_NEXT:%.*]] = fadd float [[RDX]], [[L]] +; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr float, ptr [[SRC]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 1 +; CHECK-NEXT: [[RDX_NEXT_1:%.*]] = fadd float [[RDX_NEXT]], [[L_1]] +; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3 +; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr float, ptr [[SRC]], i64 [[IV_NEXT_1]] +; CHECK-NEXT: [[L_2:%.*]] = load float, ptr [[GEP_SRC_2]], align 1 +; CHECK-NEXT: [[RDX_NEXT_2:%.*]] = fadd float [[RDX_NEXT_1]], [[L_2]] +; CHECK-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4 +; CHECK-NEXT: [[GEP_SRC_24:%.*]] = getelementptr float, ptr [[SRC]], i64 [[IV_NEXT_2]] +; CHECK-NEXT: [[L_24:%.*]] = load float, ptr [[GEP_SRC_24]], align 1 +; CHECK-NEXT: [[RDX_NEXT_3]] = fadd float [[RDX_NEXT_2]], [[L_24]] +; CHECK-NEXT: [[EC_3:%.*]] = icmp ne i64 [[IV_NEXT_3]], 1000 +; CHECK-NEXT: br i1 [[EC_3]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RDX_NEXT_LCSSA:%.*]] = phi float [ [[RDX_NEXT_3]], %[[LOOP]] ] +; CHECK-NEXT: ret float [[RDX_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi float [ %start, %entry ], [ %rdx.next, %loop ] + %iv.next = add i64 %iv, 1 + %gep.src = getelementptr float, ptr %src, i64 %iv + %l = load float, ptr %gep.src, align 1 + %rdx.next = fadd float %rdx, %l + %ec = icmp ne i64 %iv.next, 1000 + br i1 %ec, label %loop, label %exit + +exit: + ret float %rdx.next +} +define i32 @test_smin(ptr %src, i64 %n) { +; CHECK-LABEL: define i32 @test_smin( +; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN:%.*]] = phi i32 [ 1000, %[[ENTRY]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 1 +; CHECK-NEXT: [[RDX_NEXT:%.*]] = call i32 @llvm.smin.i32(i32 [[MIN]], i32 [[L]]) +; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[L_1:%.*]] = load i32, ptr [[GEP_SRC_1]], align 1 +; CHECK-NEXT: [[RDX_NEXT_1:%.*]] = call i32 @llvm.smin.i32(i32 [[RDX_NEXT]], i32 [[L_1]]) +; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3 +; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT_1]] +; CHECK-NEXT: [[L_2:%.*]] = load i32, ptr [[GEP_SRC_2]], align 1 +; CHECK-NEXT: [[RDX_NEXT_2:%.*]] = call i32 @llvm.smin.i32(i32 [[RDX_NEXT_1]], i32 [[L_2]]) +; CHECK-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4 +; CHECK-NEXT: [[GEP_SRC_24:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV_NEXT_2]] +; CHECK-NEXT: [[L_24:%.*]] = load i32, ptr [[GEP_SRC_24]], align 1 +; CHECK-NEXT: [[RDX_NEXT_3]] = call i32 @llvm.smin.i32(i32 [[RDX_NEXT_2]], i32 [[L_24]]) +; CHECK-NEXT: [[EC_3:%.*]] = icmp ne i64 [[IV_NEXT_3]], 1000 +; CHECK-NEXT: br i1 [[EC_3]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RDX_NEXT_LCSSA:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ] +; CHECK-NEXT: ret i32 [[RDX_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %min = phi i32 [ 1000, %entry ], [ %rdx.next, %loop ] + %iv.next = add i64 %iv, 1 + %gep.src = getelementptr i32, ptr %src, i64 %iv + %l = load i32 , ptr %gep.src, align 1 + %rdx.next = call i32 @llvm.smin(i32 %min, i32 %l) + %ec = icmp ne i64 %iv.next, 1000 + br i1 %ec, label %loop, label %exit + +exit: + ret i32 %rdx.next +} + +define i64 @test_any_of_reduction(ptr %src, i64 %n) { +; CHECK-LABEL: define i64 @test_any_of_reduction( +; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[ANY_OF_RDX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[RDX_NEXT_3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC]], align 1 +; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 +; CHECK-NEXT: [[RDX_NEXT:%.*]] = select i1 [[C]], i64 [[ANY_OF_RDX]], i64 0 +; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[L_1:%.*]] = load i8, ptr [[GEP_SRC_1]], align 1 +; CHECK-NEXT: [[C_1:%.*]] = icmp eq i8 [[L_1]], 0 +; CHECK-NEXT: [[RDX_NEXT_1:%.*]] = select i1 [[C_1]], i64 [[RDX_NEXT]], i64 0 +; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3 +; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV_NEXT_1]] +; CHECK-NEXT: [[L_2:%.*]] = load i8, ptr [[GEP_SRC_2]], align 1 +; CHECK-NEXT: [[C_2:%.*]] = icmp eq i8 [[L_2]], 0 +; CHECK-NEXT: [[RDX_NEXT_2:%.*]] = select i1 [[C_2]], i64 [[RDX_NEXT_1]], i64 0 +; CHECK-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4 +; CHECK-NEXT: [[GEP_SRC_24:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV_NEXT_2]] +; CHECK-NEXT: [[L_24:%.*]] = load i8, ptr [[GEP_SRC_24]], align 1 +; CHECK-NEXT: [[C_24:%.*]] = icmp eq i8 [[L_24]], 0 +; CHECK-NEXT: [[RDX_NEXT_3]] = select i1 [[C_24]], i64 [[RDX_NEXT_2]], i64 0 +; CHECK-NEXT: [[EC_3:%.*]] = icmp ne i64 [[IV_NEXT_3]], 1000 +; CHECK-NEXT: br i1 [[EC_3]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RDX_NEXT_LCSSA:%.*]] = phi i64 [ [[RDX_NEXT_3]], %[[LOOP]] ] +; CHECK-NEXT: ret i64 [[RDX_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ] + %any.of.rdx = phi i64 [ %rdx.next, %loop ], [ 0, %entry ] + %iv.next = add i64 %iv, 1 + %gep.src = getelementptr i8, ptr %src, i64 %iv + %l = load i8, ptr %gep.src, align 1 + %c = icmp eq i8 %l, 0 + %rdx.next = select i1 %c, i64 %any.of.rdx, i64 0 + %ec = icmp ne i64 %iv.next, 1000 + br i1 %ec, label %loop, label %exit + +exit: + ret i64 %rdx.next +} diff --git a/llvm/test/Transforms/LoopUnroll/runtime-unroll-reductions.ll b/llvm/test/Transforms/LoopUnroll/runtime-unroll-reductions.ll new file mode 100644 index 0000000..89f06ad --- /dev/null +++ b/llvm/test/Transforms/LoopUnroll/runtime-unroll-reductions.ll @@ -0,0 +1,238 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -p loop-unroll -S %s | FileCheck %s + +define i32 @test_add_reduction(ptr %a, i64 %n) { +; CHECK-LABEL: define i32 @test_add_reduction( +; CHECK-SAME: ptr [[A:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 +; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 +; CHECK-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; CHECK: [[ENTRY_NEW]]: +; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[IV_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY_NEW]] ], [ [[RDX_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[NITER:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[NITER_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[GEP_A]], align 2 +; CHECK-NEXT: [[RDX_NEXT:%.*]] = add nuw nsw i32 [[RDX]], [[TMP2]] +; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[GEP_A_1]], align 2 +; CHECK-NEXT: [[RDX_NEXT_1]] = add nuw nsw i32 [[RDX_NEXT]], [[TMP3]] +; CHECK-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 +; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[EXIT_UNR_LCSSA_LOOPEXIT]]: +; CHECK-NEXT: [[RES_PH_PH:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_UNR_PH:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: br label %[[EXIT_UNR_LCSSA]] +; CHECK: [[EXIT_UNR_LCSSA]]: +; CHECK-NEXT: [[RES_PH:%.*]] = phi i32 [ poison, %[[ENTRY]] ], [ [[RES_PH_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[RDX_UNR:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; CHECK: [[LOOP_EPIL_PREHEADER]]: +; CHECK-NEXT: br label %[[LOOP_EPIL:.*]] +; CHECK: [[LOOP_EPIL]]: +; CHECK-NEXT: [[GEP_A_EPIL:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_UNR]] +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[GEP_A_EPIL]], align 2 +; CHECK-NEXT: [[RDX_NEXT_EPIL:%.*]] = add nuw nsw i32 [[RDX_UNR]], [[TMP4]] +; CHECK-NEXT: br label %[[EXIT]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[RES_PH]], %[[EXIT_UNR_LCSSA]] ], [ [[RDX_NEXT_EPIL]], %[[LOOP_EPIL]] ] +; CHECK-NEXT: ret i32 [[RES]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi i32 [ 0, %entry ], [ %rdx.next, %loop ] + %gep.a = getelementptr inbounds nuw i32, ptr %a, i64 %iv + %1 = load i32, ptr %gep.a, align 2 + %rdx.next = add nuw nsw i32 %rdx, %1 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop, !llvm.loop !0 + +exit: + %res = phi i32 [ %rdx.next, %loop ] + ret i32 %res +} + +define i32 @test_add_reduction_constant_op(ptr %a, i64 %n) { +; CHECK-LABEL: define i32 @test_add_reduction_constant_op( +; CHECK-SAME: ptr [[A:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 +; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 +; CHECK-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; CHECK: [[ENTRY_NEW]]: +; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[IV_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY_NEW]] ], [ [[RDX_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[NITER:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[NITER_NEXT_1:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_NEXT_1]] = add nuw nsw i32 [[RDX]], 2 +; CHECK-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 +; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP2:![0-9]+]] +; CHECK: [[EXIT_UNR_LCSSA_LOOPEXIT]]: +; CHECK-NEXT: [[RES_PH_PH:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_UNR_PH:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: br label %[[EXIT_UNR_LCSSA]] +; CHECK: [[EXIT_UNR_LCSSA]]: +; CHECK-NEXT: [[RES_PH:%.*]] = phi i32 [ poison, %[[ENTRY]] ], [ [[RES_PH_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[RDX_UNR:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; CHECK: [[LOOP_EPIL_PREHEADER]]: +; CHECK-NEXT: br label %[[LOOP_EPIL:.*]] +; CHECK: [[LOOP_EPIL]]: +; CHECK-NEXT: [[RDX_NEXT_EPIL:%.*]] = add nuw nsw i32 [[RDX_UNR]], 1 +; CHECK-NEXT: br label %[[EXIT]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[RES_PH]], %[[EXIT_UNR_LCSSA]] ], [ [[RDX_NEXT_EPIL]], %[[LOOP_EPIL]] ] +; CHECK-NEXT: ret i32 [[RES]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi i32 [ 0, %entry ], [ %rdx.next, %loop ] + %rdx.next = add nuw nsw i32 %rdx, 1 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop, !llvm.loop !0 + +exit: + %res = phi i32 [ %rdx.next, %loop ] + ret i32 %res +} + +define i32 @test_add_reduction_8x_unroll(ptr %a, i64 %n) { +; CHECK-LABEL: define i32 @test_add_reduction_8x_unroll( +; CHECK-SAME: ptr [[A:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 +; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 7 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 7 +; CHECK-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; CHECK: [[ENTRY_NEW]]: +; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[IV_NEXT_7:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 0, %[[ENTRY_NEW]] ], [ [[RDX_NEXT_7:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[NITER:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[NITER_NEXT_7:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[GEP_A]], align 2 +; CHECK-NEXT: [[RDX_NEXT:%.*]] = add nuw nsw i32 [[RDX]], [[TMP2]] +; CHECK-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[GEP_A_1]], align 2 +; CHECK-NEXT: [[RDX_2:%.*]] = add nuw nsw i32 [[RDX_NEXT]], [[TMP3]] +; CHECK-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2 +; CHECK-NEXT: [[GEP_A_2:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT_1]] +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[GEP_A_2]], align 2 +; CHECK-NEXT: [[RDX_NEXT_2:%.*]] = add nuw nsw i32 [[RDX_2]], [[TMP4]] +; CHECK-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3 +; CHECK-NEXT: [[GEP_A_3:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT_2]] +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[GEP_A_3]], align 2 +; CHECK-NEXT: [[RDX_4:%.*]] = add nuw nsw i32 [[RDX_NEXT_2]], [[TMP5]] +; CHECK-NEXT: [[IV_NEXT_3:%.*]] = add nuw nsw i64 [[IV]], 4 +; CHECK-NEXT: [[GEP_A_4:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT_3]] +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[GEP_A_4]], align 2 +; CHECK-NEXT: [[RDX_NEXT_4:%.*]] = add nuw nsw i32 [[RDX_4]], [[TMP6]] +; CHECK-NEXT: [[IV_NEXT_4:%.*]] = add nuw nsw i64 [[IV]], 5 +; CHECK-NEXT: [[GEP_A_5:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT_4]] +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[GEP_A_5]], align 2 +; CHECK-NEXT: [[RDX_6:%.*]] = add nuw nsw i32 [[RDX_NEXT_4]], [[TMP7]] +; CHECK-NEXT: [[IV_NEXT_5:%.*]] = add nuw nsw i64 [[IV]], 6 +; CHECK-NEXT: [[GEP_A_6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT_5]] +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[GEP_A_6]], align 2 +; CHECK-NEXT: [[RDX_NEXT_6:%.*]] = add nuw nsw i32 [[RDX_6]], [[TMP8]] +; CHECK-NEXT: [[IV_NEXT_6:%.*]] = add nuw nsw i64 [[IV]], 7 +; CHECK-NEXT: [[GEP_A_7:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_NEXT_6]] +; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[GEP_A_7]], align 2 +; CHECK-NEXT: [[RDX_NEXT_7]] = add nuw nsw i32 [[RDX_NEXT_6]], [[TMP9]] +; CHECK-NEXT: [[IV_NEXT_7]] = add nuw nsw i64 [[IV]], 8 +; CHECK-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8 +; CHECK-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]] +; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: [[EXIT_UNR_LCSSA_LOOPEXIT]]: +; CHECK-NEXT: [[RES_PH_PH:%.*]] = phi i32 [ [[RDX_NEXT_7]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_7]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_UNR_PH:%.*]] = phi i32 [ [[RDX_NEXT_7]], %[[LOOP]] ] +; CHECK-NEXT: br label %[[EXIT_UNR_LCSSA]] +; CHECK: [[EXIT_UNR_LCSSA]]: +; CHECK-NEXT: [[RES_PH:%.*]] = phi i32 [ poison, %[[ENTRY]] ], [ [[RES_PH_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[RDX_UNR:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; CHECK: [[LOOP_EPIL_PREHEADER]]: +; CHECK-NEXT: br label %[[LOOP_EPIL:.*]] +; CHECK: [[LOOP_EPIL]]: +; CHECK-NEXT: [[IV_EPIL:%.*]] = phi i64 [ [[IV_UNR]], %[[LOOP_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] +; CHECK-NEXT: [[RDX_EPIL:%.*]] = phi i32 [ [[RDX_UNR]], %[[LOOP_EPIL_PREHEADER]] ], [ [[RDX_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] +; CHECK-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ 0, %[[LOOP_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], %[[LOOP_EPIL]] ] +; CHECK-NEXT: [[GEP_A_EPIL:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_EPIL]] +; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[GEP_A_EPIL]], align 2 +; CHECK-NEXT: [[RDX_NEXT_EPIL]] = add nuw nsw i32 [[RDX_EPIL]], [[TMP10]] +; CHECK-NEXT: [[IV_NEXT_EPIL]] = add nuw nsw i64 [[IV_EPIL]], 1 +; CHECK-NEXT: [[EC_EPIL:%.*]] = icmp eq i64 [[IV_NEXT_EPIL]], [[N]] +; CHECK-NEXT: [[EPIL_ITER_NEXT]] = add i64 [[EPIL_ITER]], 1 +; CHECK-NEXT: [[EPIL_ITER_CMP:%.*]] = icmp ne i64 [[EPIL_ITER_NEXT]], [[XTRAITER]] +; CHECK-NEXT: br i1 [[EPIL_ITER_CMP]], label %[[LOOP_EPIL]], label %[[EXIT_EPILOG_LCSSA:.*]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[EXIT_EPILOG_LCSSA]]: +; CHECK-NEXT: [[RES_PH1:%.*]] = phi i32 [ [[RDX_NEXT_EPIL]], %[[LOOP_EPIL]] ] +; CHECK-NEXT: br label %[[EXIT]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[RES_PH]], %[[EXIT_UNR_LCSSA]] ], [ [[RES_PH1]], %[[EXIT_EPILOG_LCSSA]] ] +; CHECK-NEXT: ret i32 [[RES]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %rdx = phi i32 [ 0, %entry ], [ %rdx.next, %loop ] + %gep.a = getelementptr inbounds nuw i32, ptr %a, i64 %iv + %1 = load i32, ptr %gep.a, align 2 + %rdx.next = add nuw nsw i32 %rdx, %1 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop, !llvm.loop !2 + +exit: + %res = phi i32 [ %rdx.next, %loop ] + ret i32 %res +} + + + +!0 = distinct !{!0, !1} +!1 = !{!"llvm.loop.unroll.count", i32 2} + +!2 = distinct !{!2, !3} +!3 = !{!"llvm.loop.unroll.count", i32 8} + +;. +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]} +; CHECK: [[META1]] = !{!"llvm.loop.unroll.disable"} +; CHECK: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]} +; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]} +; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]]} +;. diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/maxbandwidth-regpressure.ll b/llvm/test/Transforms/LoopVectorize/AArch64/maxbandwidth-regpressure.ll new file mode 100644 index 0000000..2d15431 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/AArch64/maxbandwidth-regpressure.ll @@ -0,0 +1,38 @@ +; REQUIRES: asserts +; RUN: opt -passes=loop-vectorize -vectorizer-maximize-bandwidth -debug-only=loop-vectorize -disable-output -force-vector-interleave=1 -enable-epilogue-vectorization=false -S < %s 2>&1 | FileCheck %s --check-prefixes=CHECK-REGS-VP +; RUN: opt -passes=loop-vectorize -vectorizer-maximize-bandwidth -debug-only=loop-vectorize -disable-output -force-target-num-vector-regs=1 -force-vector-interleave=1 -enable-epilogue-vectorization=false -S < %s 2>&1 | FileCheck %s --check-prefixes=CHECK-NOREGS-VP + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-none-unknown-elf" + +define i32 @dotp(ptr %a, ptr %b) #0 { +; CHECK-REGS-VP-NOT: LV(REG): Not considering vector loop of width vscale x 16 because it uses too many registers +; CHECK-REGS-VP: LV: Selecting VF: vscale x 8. +; +; CHECK-NOREGS-VP: LV(REG): Not considering vector loop of width vscale x 8 because it uses too many registers +; CHECK-NOREGS-VP: LV(REG): Not considering vector loop of width vscale x 16 because it uses too many registers +; CHECK-NOREGS-VP: LV: Selecting VF: vscale x 4. +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %accum = phi i32 [ 0, %entry ], [ %add, %for.body ] + %gep.a = getelementptr i8, ptr %a, i64 %iv + %load.a = load i8, ptr %gep.a, align 1 + %ext.a = zext i8 %load.a to i32 + %gep.b = getelementptr i8, ptr %b, i64 %iv + %load.b = load i8, ptr %gep.b, align 1 + %ext.b = zext i8 %load.b to i32 + %mul = mul i32 %ext.b, %ext.a + %sub = sub i32 0, %mul + %add = add i32 %accum, %sub + %iv.next = add i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, 1024 + br i1 %exitcond.not, label %for.exit, label %for.body + +for.exit: ; preds = %for.body + ret i32 %add +} + +attributes #0 = { vscale_range(1,16) "target-features"="+sve" } diff --git a/llvm/test/Transforms/SLPVectorizer/X86/split-node-reorder-node-with-ops.ll b/llvm/test/Transforms/SLPVectorizer/X86/split-node-reorder-node-with-ops.ll index 8e09847..cfff117 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/split-node-reorder-node-with-ops.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/split-node-reorder-node-with-ops.ll @@ -58,7 +58,6 @@ define void @test(i32 %0, i8 %1, i64 %2, float %3) { ; CHECK-NEXT: br label %[[BB54:.*]] ; CHECK: [[BB54]]: ; CHECK-NEXT: [[TMP54:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[TMP17]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 0, ptr null) ; CHECK-NEXT: [[TMP55:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[TMP21]]) ; CHECK-NEXT: [[TMP56:%.*]] = insertelement <8 x float> poison, float [[TMP3]], i32 0 ; CHECK-NEXT: [[TMP57:%.*]] = shufflevector <8 x float> [[TMP56]], <8 x float> poison, <8 x i32> <i32 poison, i32 poison, i32 0, i32 0, i32 0, i32 0, i32 poison, i32 poison> @@ -198,7 +197,6 @@ define void @test(i32 %0, i8 %1, i64 %2, float %3) { %95 = or i64 %94, %91 %96 = or i64 %95, %37 store i64 %96, ptr null, align 1 - call void @llvm.lifetime.start.p0(i64 0, ptr null) store i64 %42, ptr null, align 1 %97 = bitcast float %3 to i32 %98 = icmp ult i32 %97, 1325400064 diff --git a/llvm/test/Transforms/SimplifyCFG/switch-dup-bbs.ll b/llvm/test/Transforms/SimplifyCFG/switch-dup-bbs.ll index 32581bb..d2d917d 100644 --- a/llvm/test/Transforms/SimplifyCFG/switch-dup-bbs.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch-dup-bbs.ll @@ -199,3 +199,44 @@ exit: %ret = phi i64 [ 0, %default ], [ 0, %bb1 ], [ 1, %entry ], [ 1, %bb2 ] ret i64 %ret } + +define i32 @switch_dup_unbounded_predecessors(i32 %val) { +; SIMPLIFY-CFG-LABEL: define i32 @switch_dup_unbounded_predecessors( +; SIMPLIFY-CFG-SAME: i32 [[VAL:%.*]]) { +; SIMPLIFY-CFG-NEXT: [[ENTRY:.*]]: +; SIMPLIFY-CFG-NEXT: switch i32 [[VAL]], label %[[EXIT:.*]] [ +; SIMPLIFY-CFG-NEXT: i32 99, label %[[BB1:.*]] +; SIMPLIFY-CFG-NEXT: i32 115, label %[[BB1]] +; SIMPLIFY-CFG-NEXT: i32 102, label %[[BB1]] +; SIMPLIFY-CFG-NEXT: i32 70, label %[[BB1]] +; SIMPLIFY-CFG-NEXT: i32 101, label %[[BB1]] +; SIMPLIFY-CFG-NEXT: i32 69, label %[[BB1]] +; SIMPLIFY-CFG-NEXT: i32 103, label %[[BB1]] +; SIMPLIFY-CFG-NEXT: ] +; SIMPLIFY-CFG: [[BB1]]: +; SIMPLIFY-CFG-NEXT: br label %[[EXIT]] +; SIMPLIFY-CFG: [[EXIT]]: +; SIMPLIFY-CFG-NEXT: [[PHI:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ 1, %[[BB1]] ] +; SIMPLIFY-CFG-NEXT: ret i32 [[PHI]] +; +entry: + switch i32 %val, label %exit [ + i32 99, label %bb1 + i32 115, label %bb1 + i32 102, label %bb2 + i32 70, label %bb2 + i32 101, label %bb2 + i32 69, label %bb2 + i32 103, label %bb2 + ] + +bb1: + br label %exit + +bb2: + br label %exit + +exit: + %phi = phi i32 [ 0, %entry ], [ 1, %bb1 ], [ 1, %bb2 ] + ret i32 %phi +} diff --git a/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll b/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll index 4136f33..8f2ae2d 100644 --- a/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll @@ -149,7 +149,7 @@ unreach2: define void @pr53208_single_reachable_dest(i8 %sw, ptr %p0) { ; CHECK-LABEL: @pr53208_single_reachable_dest( -; CHECK-NEXT: group2: +; CHECK-NEXT: exit: ; CHECK-NEXT: call void @bar(ptr [[P0:%.*]]) ; CHECK-NEXT: ret void ; |