diff options
Diffstat (limited to 'llvm/test/Transforms')
107 files changed, 4037 insertions, 1178 deletions
diff --git a/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll b/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll index a38d3e5..1e789b0 100644 --- a/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll +++ b/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -passes=gvn -S | FileCheck %s +; RUN: opt < %s -passes=gvn -S | FileCheck %s --check-prefixes=CHECK,MDEP +; RUN: opt < %s -passes='gvn<memoryssa>' -S | FileCheck %s --check-prefixes=CHECK,MSSA target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" @a = common global [100 x i64] zeroinitializer, align 16 @@ -50,32 +51,56 @@ if.end: ; preds = %if.then, %entry } define void @test2(i64 %i) { -; CHECK-LABEL: @test2( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I:%.*]] -; CHECK-NEXT: [[T0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I]] -; CHECK-NEXT: [[T1:%.*]] = load i64, ptr [[ARRAYIDX1]], align 8 -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[T1]], [[T0]] -; CHECK-NEXT: store i64 [[MUL]], ptr @g1, align 8 -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[MUL]], 3 -; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]] -; CHECK: if.then: -; CHECK-NEXT: [[CALL:%.*]] = tail call i64 (...) @goo() -; CHECK-NEXT: store i64 [[CALL]], ptr @g2, align 8 -; CHECK-NEXT: [[T2_PRE:%.*]] = load i64, ptr getelementptr inbounds nuw (i8, ptr @a, i64 24), align 8 -; CHECK-NEXT: [[T3_PRE:%.*]] = load i64, ptr getelementptr inbounds nuw (i8, ptr @b, i64 24), align 8 -; CHECK-NEXT: [[DOTPRE:%.*]] = mul nsw i64 [[T3_PRE]], [[T2_PRE]] -; CHECK-NEXT: br label [[IF_END]] -; CHECK: if.end: -; CHECK-NEXT: [[MUL5_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], [[IF_THEN]] ], [ [[MUL]], [[ENTRY:%.*]] ] -; CHECK-NEXT: [[T3:%.*]] = phi i64 [ [[T3_PRE]], [[IF_THEN]] ], [ [[T1]], [[ENTRY]] ] -; CHECK-NEXT: [[T2:%.*]] = phi i64 [ [[T2_PRE]], [[IF_THEN]] ], [ [[T0]], [[ENTRY]] ] -; CHECK-NEXT: [[I_ADDR_0:%.*]] = phi i64 [ 3, [[IF_THEN]] ], [ [[I]], [[ENTRY]] ] -; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I_ADDR_0]] -; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I_ADDR_0]] -; CHECK-NEXT: store i64 [[MUL5_PRE_PHI]], ptr @g3, align 8 -; CHECK-NEXT: ret void +; MDEP-LABEL: @test2( +; MDEP-NEXT: entry: +; MDEP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I:%.*]] +; MDEP-NEXT: [[T0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 +; MDEP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I]] +; MDEP-NEXT: [[T1:%.*]] = load i64, ptr [[ARRAYIDX1]], align 8 +; MDEP-NEXT: [[MUL:%.*]] = mul nsw i64 [[T1]], [[T0]] +; MDEP-NEXT: store i64 [[MUL]], ptr @g1, align 8 +; MDEP-NEXT: [[CMP:%.*]] = icmp sgt i64 [[MUL]], 3 +; MDEP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]] +; MDEP: if.then: +; MDEP-NEXT: [[CALL:%.*]] = tail call i64 (...) @goo() +; MDEP-NEXT: store i64 [[CALL]], ptr @g2, align 8 +; MDEP-NEXT: [[T2_PRE:%.*]] = load i64, ptr getelementptr inbounds nuw (i8, ptr @a, i64 24), align 8 +; MDEP-NEXT: [[T3_PRE:%.*]] = load i64, ptr getelementptr inbounds nuw (i8, ptr @b, i64 24), align 8 +; MDEP-NEXT: [[DOTPRE:%.*]] = mul nsw i64 [[T3_PRE]], [[T2_PRE]] +; MDEP-NEXT: br label [[IF_END]] +; MDEP: if.end: +; MDEP-NEXT: [[MUL5_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], [[IF_THEN]] ], [ [[MUL]], [[ENTRY:%.*]] ] +; MDEP-NEXT: [[T3:%.*]] = phi i64 [ [[T3_PRE]], [[IF_THEN]] ], [ [[T1]], [[ENTRY]] ] +; MDEP-NEXT: [[T2:%.*]] = phi i64 [ [[T2_PRE]], [[IF_THEN]] ], [ [[T0]], [[ENTRY]] ] +; MDEP-NEXT: [[I_ADDR_0:%.*]] = phi i64 [ 3, [[IF_THEN]] ], [ [[I]], [[ENTRY]] ] +; MDEP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I_ADDR_0]] +; MDEP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I_ADDR_0]] +; MDEP-NEXT: store i64 [[MUL5_PRE_PHI]], ptr @g3, align 8 +; MDEP-NEXT: ret void +; +; MSSA-LABEL: @test2( +; MSSA-NEXT: entry: +; MSSA-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I:%.*]] +; MSSA-NEXT: [[T0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 +; MSSA-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I]] +; MSSA-NEXT: [[T1:%.*]] = load i64, ptr [[ARRAYIDX1]], align 8 +; MSSA-NEXT: [[MUL:%.*]] = mul nsw i64 [[T1]], [[T0]] +; MSSA-NEXT: store i64 [[MUL]], ptr @g1, align 8 +; MSSA-NEXT: [[CMP:%.*]] = icmp sgt i64 [[MUL]], 3 +; MSSA-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]] +; MSSA: if.then: +; MSSA-NEXT: [[CALL:%.*]] = tail call i64 (...) @goo() +; MSSA-NEXT: store i64 [[CALL]], ptr @g2, align 8 +; MSSA-NEXT: br label [[IF_END]] +; MSSA: if.end: +; MSSA-NEXT: [[I_ADDR_0:%.*]] = phi i64 [ 3, [[IF_THEN]] ], [ [[I]], [[ENTRY:%.*]] ] +; MSSA-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I_ADDR_0]] +; MSSA-NEXT: [[T2:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8 +; MSSA-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I_ADDR_0]] +; MSSA-NEXT: [[T3:%.*]] = load i64, ptr [[ARRAYIDX4]], align 8 +; MSSA-NEXT: [[MUL5:%.*]] = mul nsw i64 [[T3]], [[T2]] +; MSSA-NEXT: store i64 [[MUL5]], ptr @g3, align 8 +; MSSA-NEXT: ret void ; entry: %arrayidx = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 %i @@ -252,29 +277,50 @@ if.end3: ; preds = %if.then2, %if.else, ; available in if.then. Check that we correctly phi-translate to the phi that ; the load has been replaced with. define void @test6(ptr %ptr, i1 %arg) { -; CHECK-LABEL: @test6( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[ARRAYIDX1_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 1 -; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[ARRAYIDX1_PHI_TRANS_INSERT]], align 4 -; CHECK-NEXT: br label [[WHILE:%.*]] -; CHECK: while: -; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[DOTPRE]], [[ENTRY:%.*]] ], [ [[TMP2:%.*]], [[IF_END:%.*]] ] -; CHECK-NEXT: [[I:%.*]] = phi i64 [ 1, [[ENTRY]] ], [ [[I_NEXT:%.*]], [[IF_END]] ] -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I]] -; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I_NEXT]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], [[TMP1]] -; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]] -; CHECK: if.then: -; CHECK-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX1]], align 4 -; CHECK-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: br label [[IF_END]] -; CHECK: if.end: -; CHECK-NEXT: [[TMP2]] = phi i32 [ [[TMP0]], [[IF_THEN]] ], [ [[TMP1]], [[WHILE]] ] -; CHECK-NEXT: br i1 [[ARG:%.*]], label [[WHILE_END:%.*]], label [[WHILE]] -; CHECK: while.end: -; CHECK-NEXT: ret void +; MDEP-LABEL: @test6( +; MDEP-NEXT: entry: +; MDEP-NEXT: [[ARRAYIDX1_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 1 +; MDEP-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[ARRAYIDX1_PHI_TRANS_INSERT]], align 4 +; MDEP-NEXT: br label [[WHILE:%.*]] +; MDEP: while: +; MDEP-NEXT: [[TMP0:%.*]] = phi i32 [ [[DOTPRE]], [[ENTRY:%.*]] ], [ [[TMP2:%.*]], [[IF_END:%.*]] ] +; MDEP-NEXT: [[I:%.*]] = phi i64 [ 1, [[ENTRY]] ], [ [[I_NEXT:%.*]], [[IF_END]] ] +; MDEP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I]] +; MDEP-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 +; MDEP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I_NEXT]] +; MDEP-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; MDEP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], [[TMP1]] +; MDEP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]] +; MDEP: if.then: +; MDEP-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX1]], align 4 +; MDEP-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX2]], align 4 +; MDEP-NEXT: br label [[IF_END]] +; MDEP: if.end: +; MDEP-NEXT: [[TMP2]] = phi i32 [ [[TMP0]], [[IF_THEN]] ], [ [[TMP1]], [[WHILE]] ] +; MDEP-NEXT: br i1 [[ARG:%.*]], label [[WHILE_END:%.*]], label [[WHILE]] +; MDEP: while.end: +; MDEP-NEXT: ret void +; +; MSSA-LABEL: @test6( +; MSSA-NEXT: entry: +; MSSA-NEXT: br label [[WHILE:%.*]] +; MSSA: while: +; MSSA-NEXT: [[I:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[IF_END:%.*]] ] +; MSSA-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[I]] +; MSSA-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4 +; MSSA-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 +; MSSA-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I_NEXT]] +; MSSA-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; MSSA-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], [[TMP1]] +; MSSA-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]] +; MSSA: if.then: +; MSSA-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX1]], align 4 +; MSSA-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX2]], align 4 +; MSSA-NEXT: br label [[IF_END]] +; MSSA: if.end: +; MSSA-NEXT: br i1 [[ARG:%.*]], label [[WHILE_END:%.*]], label [[WHILE]] +; MSSA: while.end: +; MSSA-NEXT: ret void ; entry: br label %while @@ -304,24 +350,40 @@ while.end: ; Load from arrayidx2 is partially redundant, check that address translation can ; fold sext + trunc across phi node together. define i32 @test7(ptr noalias %ptr1, ptr noalias %ptr2, i32 %i, i1 %cond) { -; CHECK-LABEL: @test7( -; CHECK-NEXT: entry: -; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[ENTRY_IF_END_CRIT_EDGE:%.*]] -; CHECK: entry.if.end_crit_edge: -; CHECK-NEXT: [[RES_PRE:%.*]] = load i32, ptr [[PTR1:%.*]], align 4 -; CHECK-NEXT: br label [[IF_END:%.*]] -; CHECK: if.then: -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[I:%.*]] -; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: store i32 [[TMP]], ptr [[PTR2:%.*]], align 4 -; CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[I]] to i64 -; CHECK-NEXT: br label [[IF_END]] -; CHECK: if.end: -; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[RES_PRE]], [[ENTRY_IF_END_CRIT_EDGE]] ], [ [[TMP]], [[IF_THEN]] ] -; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, [[ENTRY_IF_END_CRIT_EDGE]] ], [ [[IDX_EXT]], [[IF_THEN]] ] -; CHECK-NEXT: [[IDX_TRUNC:%.*]] = trunc i64 [[IDX]] to i32 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[IDX_TRUNC]] -; CHECK-NEXT: ret i32 [[RES]] +; MDEP-LABEL: @test7( +; MDEP-NEXT: entry: +; MDEP-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[ENTRY_IF_END_CRIT_EDGE:%.*]] +; MDEP: entry.if.end_crit_edge: +; MDEP-NEXT: [[RES_PRE:%.*]] = load i32, ptr [[PTR1:%.*]], align 4 +; MDEP-NEXT: br label [[IF_END:%.*]] +; MDEP: if.then: +; MDEP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[I:%.*]] +; MDEP-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; MDEP-NEXT: store i32 [[TMP]], ptr [[PTR2:%.*]], align 4 +; MDEP-NEXT: [[IDX_EXT:%.*]] = sext i32 [[I]] to i64 +; MDEP-NEXT: br label [[IF_END]] +; MDEP: if.end: +; MDEP-NEXT: [[RES:%.*]] = phi i32 [ [[RES_PRE]], [[ENTRY_IF_END_CRIT_EDGE]] ], [ [[TMP]], [[IF_THEN]] ] +; MDEP-NEXT: [[IDX:%.*]] = phi i64 [ 0, [[ENTRY_IF_END_CRIT_EDGE]] ], [ [[IDX_EXT]], [[IF_THEN]] ] +; MDEP-NEXT: [[IDX_TRUNC:%.*]] = trunc i64 [[IDX]] to i32 +; MDEP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[IDX_TRUNC]] +; MDEP-NEXT: ret i32 [[RES]] +; +; MSSA-LABEL: @test7( +; MSSA-NEXT: entry: +; MSSA-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]] +; MSSA: if.then: +; MSSA-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[PTR1:%.*]], i32 [[I:%.*]] +; MSSA-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; MSSA-NEXT: store i32 [[TMP]], ptr [[PTR2:%.*]], align 4 +; MSSA-NEXT: [[IDX_EXT:%.*]] = sext i32 [[I]] to i64 +; MSSA-NEXT: br label [[IF_END]] +; MSSA: if.end: +; MSSA-NEXT: [[IDX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IDX_EXT]], [[IF_THEN]] ] +; MSSA-NEXT: [[IDX_TRUNC:%.*]] = trunc i64 [[IDX]] to i32 +; MSSA-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[IDX_TRUNC]] +; MSSA-NEXT: [[RES:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; MSSA-NEXT: ret i32 [[RES]] ; entry: br i1 %cond, label %if.then, label %if.end diff --git a/llvm/test/Transforms/GVN/PRE/phi-translate-add.ll b/llvm/test/Transforms/GVN/PRE/phi-translate-add.ll index ea43307..cb05a8e 100644 --- a/llvm/test/Transforms/GVN/PRE/phi-translate-add.ll +++ b/llvm/test/Transforms/GVN/PRE/phi-translate-add.ll @@ -1,21 +1,35 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -passes=gvn -gvn-add-phi-translation=true -S < %s | FileCheck %s --check-prefix=ADD-TRANS-ON -; RUN: opt -passes=gvn -gvn-add-phi-translation=false -S < %s | FileCheck %s --check-prefix=ADD-TRANS-OFF +; RUN: opt -passes=gvn -gvn-add-phi-translation=true -S < %s | FileCheck %s --check-prefix=ADD-TRANS-ON --check-prefixes=CHECK,PT-ON-MDEP +; RUN: opt -passes='gvn<memoryssa>' -gvn-add-phi-translation=true -S < %s | FileCheck %s --check-prefix=ADD-TRANS-ON --check-prefixes=CHECK,PT-ON-MSSA +; RUN: opt -passes=gvn -gvn-add-phi-translation=false -S < %s | FileCheck %s --check-prefix=ADD-TRANS-OFF --check-prefixes=CHECK,PT-OFF-MDEP +; RUN: opt -passes='gvn<memoryssa>' -gvn-add-phi-translation=false -S < %s | FileCheck %s --check-prefix=ADD-TRANS-OFF --check-prefixes=CHECK,PT-OFF-MSSA ; Test that phi translation is able to hoist a load whose address ; depends on an add also being hoisted. define double @phi_translation_hoists_add(ptr %a, i64 %idx) { -; ADD-TRANS-ON-LABEL: @phi_translation_hoists_add( -; ADD-TRANS-ON-NEXT: entry: -; ADD-TRANS-ON-NEXT: [[ADD_PHI_TRANS_INSERT:%.*]] = add nuw nsw i64 [[IDX:%.*]], 1 -; ADD-TRANS-ON-NEXT: [[GEP_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[ADD_PHI_TRANS_INSERT]] -; ADD-TRANS-ON-NEXT: [[LOAD_PRE:%.*]] = load double, ptr [[GEP_PHI_TRANS_INSERT]], align 8 -; ADD-TRANS-ON-NEXT: br label [[FOR_BODY:%.*]] -; ADD-TRANS-ON: for.body: -; ADD-TRANS-ON-NEXT: [[CMP:%.*]] = fcmp ole double [[LOAD_PRE]], 1.000000e+00 -; ADD-TRANS-ON-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[FOR_BODY]] -; ADD-TRANS-ON: exit: -; ADD-TRANS-ON-NEXT: ret double [[LOAD_PRE]] +; PT-ON-MDEP-LABEL: @phi_translation_hoists_add( +; PT-ON-MDEP-NEXT: entry: +; PT-ON-MDEP-NEXT: [[ADD_PHI_TRANS_INSERT:%.*]] = add nuw nsw i64 [[IDX:%.*]], 1 +; PT-ON-MDEP-NEXT: [[GEP_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[ADD_PHI_TRANS_INSERT]] +; PT-ON-MDEP-NEXT: [[LOAD_PRE:%.*]] = load double, ptr [[GEP_PHI_TRANS_INSERT]], align 8 +; PT-ON-MDEP-NEXT: br label [[FOR_BODY:%.*]] +; PT-ON-MDEP: for.body: +; PT-ON-MDEP-NEXT: [[CMP:%.*]] = fcmp ole double [[LOAD_PRE]], 1.000000e+00 +; PT-ON-MDEP-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[FOR_BODY]] +; PT-ON-MDEP: exit: +; PT-ON-MDEP-NEXT: ret double [[LOAD_PRE]] +; +; PT-ON-MSSA-LABEL: @phi_translation_hoists_add( +; PT-ON-MSSA-NEXT: entry: +; PT-ON-MSSA-NEXT: br label [[FOR_BODY:%.*]] +; PT-ON-MSSA: for.body: +; PT-ON-MSSA-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[IDX:%.*]], 1 +; PT-ON-MSSA-NEXT: [[GEP:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[ADD]] +; PT-ON-MSSA-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8 +; PT-ON-MSSA-NEXT: [[CMP:%.*]] = fcmp ole double [[LOAD]], 1.000000e+00 +; PT-ON-MSSA-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[FOR_BODY]] +; PT-ON-MSSA: exit: +; PT-ON-MSSA-NEXT: ret double [[LOAD]] ; ; ADD-TRANS-OFF-LABEL: @phi_translation_hoists_add( ; ADD-TRANS-OFF-NEXT: entry: @@ -42,3 +56,8 @@ for.body: ; preds = %for.body, %entry exit: ret double %load } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; ADD-TRANS-ON: {{.*}} +; CHECK: {{.*}} +; PT-OFF-MDEP: {{.*}} +; PT-OFF-MSSA: {{.*}} diff --git a/llvm/test/Transforms/GVN/PRE/phi-translate.ll b/llvm/test/Transforms/GVN/PRE/phi-translate.ll index 713f012..084c449 100644 --- a/llvm/test/Transforms/GVN/PRE/phi-translate.ll +++ b/llvm/test/Transforms/GVN/PRE/phi-translate.ll @@ -1,23 +1,53 @@ -; RUN: opt -passes=gvn -S < %s | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -passes=gvn -S < %s | FileCheck %s --check-prefixes=CHECK,MDEP +; RUN: opt -passes='gvn<memoryssa>' -S < %s | FileCheck %s --check-prefixes=CHECK,MSSA target datalayout = "e-p:64:64:64" -; CHECK-LABEL: @foo( -; CHECK: entry.end_crit_edge: -; CHECK: %[[INDEX:[a-z0-9.]+]] = sext i32 %x to i64{{$}} -; CHECK: %[[ADDRESS:[a-z0-9.]+]] = getelementptr [100 x i32], ptr @G, i64 0, i64 %[[INDEX]]{{$}} -; CHECK: %n.pre = load i32, ptr %[[ADDRESS]], align 4, !dbg [[N_LOC:![0-9]+]] -; CHECK: br label %end -; CHECK: then: -; CHECK: store i32 %z -; CHECK: end: -; CHECK: %n = phi i32 [ %n.pre, %entry.end_crit_edge ], [ %z, %then ], !dbg [[N_LOC]] -; CHECK: ret i32 %n -; CHECK: [[N_LOC]] = !DILocation(line: 47, column: 1, scope: !{{.*}}) @G = external global [100 x i32] define i32 @foo(i32 %x, i32 %z) !dbg !6 { +; MDEP-LABEL: define i32 @foo( +; MDEP-SAME: i32 [[X:%.*]], i32 [[Z:%.*]]) !dbg [[DBG5:![0-9]+]] { +; MDEP-NEXT: [[ENTRY:.*:]] +; MDEP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[Z]], 0, !dbg [[DBG8:![0-9]+]] +; MDEP-NEXT: br i1 [[TOBOOL]], label %[[ENTRY_END_CRIT_EDGE:.*]], label %[[THEN:.*]], !dbg [[DBG8]] +; MDEP: [[ENTRY_END_CRIT_EDGE]]: +; MDEP-NEXT: [[J_PHI_TRANS_INSERT:%.*]] = sext i32 [[X]] to i64 +; MDEP-NEXT: [[Q_PHI_TRANS_INSERT:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[J_PHI_TRANS_INSERT]] +; MDEP-NEXT: [[N_PRE:%.*]] = load i32, ptr [[Q_PHI_TRANS_INSERT]], align 4, !dbg [[DBG9:![0-9]+]] +; MDEP-NEXT: br label %[[END:.*]], !dbg [[DBG8]] +; MDEP: [[THEN]]: +; MDEP-NEXT: [[I:%.*]] = sext i32 [[X]] to i64, !dbg [[DBG10:![0-9]+]] +; MDEP-NEXT: [[P:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[I]], !dbg [[DBG10]] +; MDEP-NEXT: store i32 [[Z]], ptr [[P]], align 4, !dbg [[DBG10]] +; MDEP-NEXT: br label %[[END]], !dbg [[DBG10]] +; MDEP: [[END]]: +; MDEP-NEXT: [[J_PRE_PHI:%.*]] = phi i64 [ [[J_PHI_TRANS_INSERT]], %[[ENTRY_END_CRIT_EDGE]] ], [ [[I]], %[[THEN]] ], !dbg [[DBG11:![0-9]+]] +; MDEP-NEXT: [[N:%.*]] = phi i32 [ [[N_PRE]], %[[ENTRY_END_CRIT_EDGE]] ], [ [[Z]], %[[THEN]] ], !dbg [[DBG9]] +; MDEP-NEXT: [[Q:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[J_PRE_PHI]], !dbg [[DBG12:![0-9]+]] +; MDEP-NEXT: ret i32 [[N]], !dbg [[DBG9]] +; +; MSSA-LABEL: define i32 @foo( +; MSSA-SAME: i32 [[X:%.*]], i32 [[Z:%.*]]) !dbg [[DBG5:![0-9]+]] { +; MSSA-NEXT: [[ENTRY:.*:]] +; MSSA-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[Z]], 0, !dbg [[DBG8:![0-9]+]] +; MSSA-NEXT: br i1 [[TOBOOL]], label %[[ENTRY_END_CRIT_EDGE:.*]], label %[[THEN:.*]], !dbg [[DBG8]] +; MSSA: [[ENTRY_END_CRIT_EDGE]]: +; MSSA-NEXT: [[DOTPRE:%.*]] = sext i32 [[X]] to i64, !dbg [[DBG9:![0-9]+]] +; MSSA-NEXT: br label %[[END:.*]], !dbg [[DBG8]] +; MSSA: [[THEN]]: +; MSSA-NEXT: [[I:%.*]] = sext i32 [[X]] to i64, !dbg [[DBG10:![0-9]+]] +; MSSA-NEXT: [[P:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[I]], !dbg [[DBG10]] +; MSSA-NEXT: store i32 [[Z]], ptr [[P]], align 4, !dbg [[DBG10]] +; MSSA-NEXT: br label %[[END]], !dbg [[DBG10]] +; MSSA: [[END]]: +; MSSA-NEXT: [[J_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], %[[ENTRY_END_CRIT_EDGE]] ], [ [[I]], %[[THEN]] ], !dbg [[DBG9]] +; MSSA-NEXT: [[Q:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[J_PRE_PHI]], !dbg [[DBG11:![0-9]+]] +; MSSA-NEXT: [[N:%.*]] = load i32, ptr [[Q]], align 4, !dbg [[DBG12:![0-9]+]] +; MSSA-NEXT: ret i32 [[N]], !dbg [[DBG12]] +; entry: %tobool = icmp eq i32 %z, 0, !dbg !7 br i1 %tobool, label %end, label %then, !dbg !7 @@ -51,6 +81,31 @@ end: !10 = !DILocation(line: 46, column: 1, scope: !6) !11 = !DILocation(line: 47, column: 1, scope: !6) !12 = distinct !DICompileUnit(language: DW_LANG_C99, producer: "clang", - file: !5, - isOptimized: true, flags: "-O2", - splitDebugFilename: "abc.debug", emissionKind: 2) + file: !5, + isOptimized: true, flags: "-O2", + splitDebugFilename: "abc.debug", emissionKind: 2) +;. +; MDEP: [[META3:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C99, file: [[META4:![0-9]+]], producer: "clang", isOptimized: true, flags: "-O2", runtimeVersion: 0, splitDebugFilename: "abc.debug", emissionKind: LineTablesOnly) +; MDEP: [[META4]] = !DIFile(filename: "{{.*}}a.cc", directory: {{.*}}) +; MDEP: [[DBG5]] = distinct !DISubprogram(name: "foo", scope: [[META4]], file: [[META4]], line: 42, type: [[META6:![0-9]+]], scopeLine: 43, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: [[META3]], retainedNodes: [[META7:![0-9]+]]) +; MDEP: [[META6]] = !DISubroutineType(types: [[META7]]) +; MDEP: [[META7]] = !{} +; MDEP: [[DBG8]] = !DILocation(line: 43, column: 1, scope: [[DBG5]]) +; MDEP: [[DBG9]] = !DILocation(line: 47, column: 1, scope: [[DBG5]]) +; MDEP: [[DBG10]] = !DILocation(line: 44, column: 1, scope: [[DBG5]]) +; MDEP: [[DBG11]] = !DILocation(line: 45, column: 1, scope: [[DBG5]]) +; MDEP: [[DBG12]] = !DILocation(line: 46, column: 1, scope: [[DBG5]]) +;. +; MSSA: [[META3:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C99, file: [[META4:![0-9]+]], producer: "clang", isOptimized: true, flags: "-O2", runtimeVersion: 0, splitDebugFilename: "abc.debug", emissionKind: LineTablesOnly) +; MSSA: [[META4]] = !DIFile(filename: "{{.*}}a.cc", directory: {{.*}}) +; MSSA: [[DBG5]] = distinct !DISubprogram(name: "foo", scope: [[META4]], file: [[META4]], line: 42, type: [[META6:![0-9]+]], scopeLine: 43, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: [[META3]], retainedNodes: [[META7:![0-9]+]]) +; MSSA: [[META6]] = !DISubroutineType(types: [[META7]]) +; MSSA: [[META7]] = !{} +; MSSA: [[DBG8]] = !DILocation(line: 43, column: 1, scope: [[DBG5]]) +; MSSA: [[DBG9]] = !DILocation(line: 45, column: 1, scope: [[DBG5]]) +; MSSA: [[DBG10]] = !DILocation(line: 44, column: 1, scope: [[DBG5]]) +; MSSA: [[DBG11]] = !DILocation(line: 46, column: 1, scope: [[DBG5]]) +; MSSA: [[DBG12]] = !DILocation(line: 47, column: 1, scope: [[DBG5]]) +;. +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/Transforms/GVN/PRE/pre-aliasning-path.ll b/llvm/test/Transforms/GVN/PRE/pre-aliasning-path.ll index 9ca3e1b..60611a0 100644 --- a/llvm/test/Transforms/GVN/PRE/pre-aliasning-path.ll +++ b/llvm/test/Transforms/GVN/PRE/pre-aliasning-path.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -enable-load-pre -enable-pre -passes=gvn -S < %s | FileCheck %s +; RUN: opt -enable-load-pre -enable-pre -passes=gvn -S < %s | FileCheck %s --check-prefixes=CHECK,MDEP +; RUN: opt -enable-load-pre -enable-pre -passes='gvn<memoryssa>' -S < %s | FileCheck %s --check-prefixes=CHECK,MSSA declare void @side_effect_0() nofree @@ -102,25 +103,45 @@ exit: } define i32 @test_03(ptr %p) { -; CHECK-LABEL: @test_03( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[X_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4 -; CHECK-NEXT: br label [[LOOP:%.*]] -; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[BACKEDGE:%.*]] ] -; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[X_PRE]], 100 -; CHECK-NEXT: br i1 [[COND]], label [[HOT_PATH:%.*]], label [[COLD_PATH:%.*]] -; CHECK: hot_path: -; CHECK-NEXT: br label [[BACKEDGE]] -; CHECK: cold_path: -; CHECK-NEXT: call void @no_side_effect() -; CHECK-NEXT: br label [[BACKEDGE]] -; CHECK: backedge: -; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X_PRE]] -; CHECK-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[LOOP_COND]], label [[LOOP]], label [[EXIT:%.*]] -; CHECK: exit: -; CHECK-NEXT: ret i32 [[X_PRE]] +; MDEP-LABEL: @test_03( +; MDEP-NEXT: entry: +; MDEP-NEXT: [[X_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4 +; MDEP-NEXT: br label [[LOOP:%.*]] +; MDEP: loop: +; MDEP-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[BACKEDGE:%.*]] ] +; MDEP-NEXT: [[COND:%.*]] = icmp ult i32 [[X_PRE]], 100 +; MDEP-NEXT: br i1 [[COND]], label [[HOT_PATH:%.*]], label [[COLD_PATH:%.*]] +; MDEP: hot_path: +; MDEP-NEXT: br label [[BACKEDGE]] +; MDEP: cold_path: +; MDEP-NEXT: call void @no_side_effect() +; MDEP-NEXT: br label [[BACKEDGE]] +; MDEP: backedge: +; MDEP-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X_PRE]] +; MDEP-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000 +; MDEP-NEXT: br i1 [[LOOP_COND]], label [[LOOP]], label [[EXIT:%.*]] +; MDEP: exit: +; MDEP-NEXT: ret i32 [[X_PRE]] +; +; MSSA-LABEL: @test_03( +; MSSA-NEXT: entry: +; MSSA-NEXT: br label [[LOOP:%.*]] +; MSSA: loop: +; MSSA-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[BACKEDGE:%.*]] ] +; MSSA-NEXT: [[X:%.*]] = load i32, ptr [[P:%.*]], align 4 +; MSSA-NEXT: [[COND:%.*]] = icmp ult i32 [[X]], 100 +; MSSA-NEXT: br i1 [[COND]], label [[HOT_PATH:%.*]], label [[COLD_PATH:%.*]] +; MSSA: hot_path: +; MSSA-NEXT: br label [[BACKEDGE]] +; MSSA: cold_path: +; MSSA-NEXT: call void @no_side_effect() +; MSSA-NEXT: br label [[BACKEDGE]] +; MSSA: backedge: +; MSSA-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X]] +; MSSA-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000 +; MSSA-NEXT: br i1 [[LOOP_COND]], label [[LOOP]], label [[EXIT:%.*]] +; MSSA: exit: +; MSSA-NEXT: ret i32 [[X]] ; entry: br label %loop diff --git a/llvm/test/Transforms/GVN/PRE/pre-basic-add.ll b/llvm/test/Transforms/GVN/PRE/pre-basic-add.ll index f099ddc..9bf6496 100644 --- a/llvm/test/Transforms/GVN/PRE/pre-basic-add.ll +++ b/llvm/test/Transforms/GVN/PRE/pre-basic-add.ll @@ -1,33 +1,53 @@ -; RUN: opt < %s -passes=gvn -enable-pre -S | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=gvn -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP +; RUN: opt < %s -passes='gvn<memoryssa>' -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA ; RUN: opt < %s -passes="gvn<pre>" -enable-pre=false -S | FileCheck %s @H = common global i32 0 ; <ptr> [#uses=2] @G = common global i32 0 ; <ptr> [#uses=1] define i32 @test() nounwind { +; CHECK-LABEL: define i32 @test( +; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @H, align 4 +; CHECK-NEXT: [[TMP1:%.*]] = call i32 (...) @foo() #[[ATTR0]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[TMP2]], label %[[BB:.*]], label %[[ENTRY_BB1_CRIT_EDGE:.*]] +; CHECK: [[ENTRY_BB1_CRIT_EDGE]]: +; CHECK-NEXT: [[DOTPRE:%.*]] = add i32 [[TMP0]], 42 +; CHECK-NEXT: br label %[[BB1:.*]] +; CHECK: [[BB]]: +; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP0]], 42 +; CHECK-NEXT: store i32 [[TMP3]], ptr @G, align 4 +; CHECK-NEXT: br label %[[BB1]] +; CHECK: [[BB1]]: +; CHECK-NEXT: [[DOTPRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], %[[ENTRY_BB1_CRIT_EDGE]] ], [ [[TMP3]], %[[BB]] ] +; CHECK-NEXT: store i32 [[DOTPRE_PHI]], ptr @H, align 4 +; CHECK-NEXT: ret i32 0 +; entry: - %0 = load i32, ptr @H, align 4 ; <i32> [#uses=2] - %1 = call i32 (...) @foo() nounwind ; <i32> [#uses=1] - %2 = icmp ne i32 %1, 0 ; <i1> [#uses=1] - br i1 %2, label %bb, label %bb1 + %0 = load i32, ptr @H, align 4 ; <i32> [#uses=2] + %1 = call i32 (...) @foo() nounwind ; <i32> [#uses=1] + %2 = icmp ne i32 %1, 0 ; <i1> [#uses=1] + br i1 %2, label %bb, label %bb1 bb: ; preds = %entry - %3 = add i32 %0, 42 ; <i32> [#uses=1] -; CHECK: %.pre = add i32 %0, 42 - store i32 %3, ptr @G, align 4 - br label %bb1 + %3 = add i32 %0, 42 ; <i32> [#uses=1] + store i32 %3, ptr @G, align 4 + br label %bb1 bb1: ; preds = %bb, %entry - %4 = add i32 %0, 42 ; <i32> [#uses=1] - store i32 %4, ptr @H, align 4 - br label %return + %4 = add i32 %0, 42 ; <i32> [#uses=1] + store i32 %4, ptr @H, align 4 + br label %return -; CHECK: %.pre-phi = phi i32 [ %.pre, %entry.bb1_crit_edge ], [ %3, %bb ] -; CHECK-NEXT: store i32 %.pre-phi, ptr @H, align 4 -; CHECK-NEXT: ret i32 0 return: ; preds = %bb1 - ret i32 0 + ret i32 0 } declare i32 @foo(...) +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; MDEP: {{.*}} +; MSSA: {{.*}} diff --git a/llvm/test/Transforms/GVN/PRE/pre-jt-add.ll b/llvm/test/Transforms/GVN/PRE/pre-jt-add.ll index 95f8f3f..f62d06d 100644 --- a/llvm/test/Transforms/GVN/PRE/pre-jt-add.ll +++ b/llvm/test/Transforms/GVN/PRE/pre-jt-add.ll @@ -1,16 +1,33 @@ -; RUN: opt < %s -passes=gvn,jump-threading -enable-pre -S | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=gvn,jump-threading -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP +; RUN: opt < %s -passes='gvn<memoryssa>',jump-threading -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA @H = common global i32 0 @G = common global i32 0 define i32 @test(i1 %cond, i32 %v) nounwind { -; CHECK-LABEL: @test +; CHECK-LABEL: define i32 @test( +; CHECK-SAME: i1 [[COND:%.*]], i32 [[V:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br i1 [[COND]], label %[[BB:.*]], label %[[MERGE:.*]] +; CHECK: [[BB]]: +; CHECK-NEXT: store i32 -1, ptr @G, align 4 +; CHECK-NEXT: br label %[[MERGE]] +; CHECK: [[MERGE]]: +; CHECK-NEXT: [[ADD_2:%.*]] = add i32 [[V]], -1 +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[ADD_2]], 0 +; CHECK-NEXT: br i1 [[CMP]], label %[[ACTION:.*]], label %[[RETURN:.*]] +; CHECK: [[ACTION]]: +; CHECK-NEXT: store i32 [[ADD_2]], ptr @H, align 4 +; CHECK-NEXT: br label %[[RETURN]] +; CHECK: [[RETURN]]: +; CHECK-NEXT: [[P:%.*]] = phi i32 [ 0, %[[MERGE]] ], [ 1, %[[ACTION]] ] +; CHECK-NEXT: ret i32 [[P]] +; entry: br i1 %cond, label %bb, label %bb1 bb: -; CHECK: store -; CHECK-NOT: br label %return %add.1 = add nuw nsw i32 %v, -1 store i32 %add.1, ptr @G, align 4 br label %merge @@ -24,8 +41,6 @@ merge: br i1 %cmp, label %action, label %return action: -; CHECK: store -; CHECK-NEXT: br label %return store i32 %add.2, ptr @H, align 4 br label %return @@ -34,3 +49,6 @@ return: ret i32 %p } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; MDEP: {{.*}} +; MSSA: {{.*}} diff --git a/llvm/test/Transforms/GVN/PRE/pre-load-dbg.ll b/llvm/test/Transforms/GVN/PRE/pre-load-dbg.ll index 8c020fd..f961f23 100644 --- a/llvm/test/Transforms/GVN/PRE/pre-load-dbg.ll +++ b/llvm/test/Transforms/GVN/PRE/pre-load-dbg.ll @@ -1,4 +1,6 @@ -; RUN: opt < %s -passes=gvn -gvn-max-num-insns=22 -S | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=gvn -gvn-max-num-insns=22 -S | FileCheck %s --check-prefixes=CHECK,MDEP +; RUN: opt < %s -passes='gvn<memoryssa>' -gvn-max-num-insns=22 -S | FileCheck %s --check-prefixes=CHECK,MSSA ; Debug information should not impact gvn. The following two functions have same ; code except debug information. They should generate same optimized @@ -11,13 +13,80 @@ @h = global %struct.a zeroinitializer, align 1 define void @withdbg() { -; CHECK-LABEL: @withdbg -; CHECK: [[PRE_PRE1:%.*]] = load i16, ptr @f, align 1 -; CHECK-NEXT: [[PRE_PRE2:%.*]] = load ptr, ptr @m, align 1 -; CHECK-NEXT: br i1 true, label %[[BLOCK1:.*]], label %[[BLOCK2:.*]] -; CHECK: [[BLOCK1]]: -; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[PRE_PRE1]] to i32 -; CHECK-NEXT: store i32 [[CONV]], ptr [[PRE_PRE2]], align 1 +; MDEP-LABEL: define void @withdbg() { +; MDEP-NEXT: [[ENTRY:.*:]] +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_I:%.*]] = alloca i16, align 1 +; MDEP-NEXT: [[TMP11_PRE:%.*]] = load i16, ptr @f, align 1 +; MDEP-NEXT: [[TMP12_PRE:%.*]] = load ptr, ptr @m, align 1 +; MDEP-NEXT: br i1 true, label %[[LOR_END:.*]], label %[[LOR_RHS:.*]] +; MDEP: [[LOR_RHS]]: +; MDEP-NEXT: #dbg_declare(ptr undef, [[META4:![0-9]+]], !DIExpression(), [[META14:![0-9]+]]) +; MDEP-NEXT: #dbg_declare(ptr undef, [[META10:![0-9]+]], !DIExpression(), [[META14]]) +; MDEP-NEXT: #dbg_declare(ptr undef, [[META11:![0-9]+]], !DIExpression(), [[META14]]) +; MDEP-NEXT: #dbg_declare(ptr undef, [[META12:![0-9]+]], !DIExpression(), [[META14]]) +; MDEP-NEXT: #dbg_declare(ptr undef, [[META13:![0-9]+]], !DIExpression(), [[META14]]) +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: br label %[[LOR_END]] +; MDEP: [[LOR_END]]: +; MDEP-NEXT: [[CONV_I_I6:%.*]] = sext i16 [[TMP11_PRE]] to i32 +; MDEP-NEXT: store i32 [[CONV_I_I6]], ptr [[TMP12_PRE]], align 1 +; MDEP-NEXT: ret void +; +; MSSA-LABEL: define void @withdbg() { +; MSSA-NEXT: [[ENTRY:.*:]] +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_I:%.*]] = alloca i16, align 1 +; MSSA-NEXT: br i1 true, label %[[LOR_END:.*]], label %[[LOR_RHS:.*]] +; MSSA: [[LOR_RHS]]: +; MSSA-NEXT: #dbg_declare(ptr undef, [[META4:![0-9]+]], !DIExpression(), [[META14:![0-9]+]]) +; MSSA-NEXT: #dbg_declare(ptr undef, [[META10:![0-9]+]], !DIExpression(), [[META14]]) +; MSSA-NEXT: #dbg_declare(ptr undef, [[META11:![0-9]+]], !DIExpression(), [[META14]]) +; MSSA-NEXT: #dbg_declare(ptr undef, [[META12:![0-9]+]], !DIExpression(), [[META14]]) +; MSSA-NEXT: #dbg_declare(ptr undef, [[META13:![0-9]+]], !DIExpression(), [[META14]]) +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[FVALUE:%.*]] = load i16, ptr @f, align 1 +; MSSA-NEXT: [[MVALUE:%.*]] = load ptr, ptr @m, align 1 +; MSSA-NEXT: br label %[[LOR_END]] +; MSSA: [[LOR_END]]: +; MSSA-NEXT: [[TMP11:%.*]] = load i16, ptr @f, align 1 +; MSSA-NEXT: [[CONV_I_I6:%.*]] = sext i16 [[TMP11]] to i32 +; MSSA-NEXT: [[TMP12:%.*]] = load ptr, ptr @m, align 1 +; MSSA-NEXT: store i32 [[CONV_I_I6]], ptr [[TMP12]], align 1 +; MSSA-NEXT: ret void +; entry: %agg.tmp.ensured.sroa.0.i = alloca i16, align 1 @@ -61,13 +130,70 @@ lor.end: ; preds = %lor.rhs, %entry } define void @lessdbg() { -; CHECK-LABEL: @lessdbg -; CHECK: [[PRE_PRE1:%.*]] = load i16, ptr @f, align 1 -; CHECK-NEXT: [[PRE_PRE2:%.*]] = load ptr, ptr @m, align 1 -; CHECK-NEXT: br i1 true, label %[[BLOCK1:.*]], label %[[BLOCK2:.*]] -; CHECK: [[BLOCK1]]: -; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[PRE_PRE1]] to i32 -; CHECK-NEXT: store i32 [[CONV]], ptr [[PRE_PRE2]], align 1 +; MDEP-LABEL: define void @lessdbg() { +; MDEP-NEXT: [[ENTRY:.*:]] +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_I:%.*]] = alloca i16, align 1 +; MDEP-NEXT: [[TMP11_PRE:%.*]] = load i16, ptr @f, align 1 +; MDEP-NEXT: [[TMP12_PRE:%.*]] = load ptr, ptr @m, align 1 +; MDEP-NEXT: br i1 true, label %[[LOR_END:.*]], label %[[LOR_RHS:.*]] +; MDEP: [[LOR_RHS]]: +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I:%.*]] = load volatile i16, ptr @h, align 1 +; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MDEP-NEXT: br label %[[LOR_END]] +; MDEP: [[LOR_END]]: +; MDEP-NEXT: [[CONV_I_I6:%.*]] = sext i16 [[TMP11_PRE]] to i32 +; MDEP-NEXT: store i32 [[CONV_I_I6]], ptr [[TMP12_PRE]], align 1 +; MDEP-NEXT: ret void +; +; MSSA-LABEL: define void @lessdbg() { +; MSSA-NEXT: [[ENTRY:.*:]] +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_I:%.*]] = alloca i16, align 1 +; MSSA-NEXT: br i1 true, label %[[LOR_END:.*]], label %[[LOR_RHS:.*]] +; MSSA: [[LOR_RHS]]: +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I:%.*]] = load volatile i16, ptr @h, align 1 +; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1 +; MSSA-NEXT: [[FVALUE:%.*]] = load i16, ptr @f, align 1 +; MSSA-NEXT: [[MVALUE:%.*]] = load ptr, ptr @m, align 1 +; MSSA-NEXT: br label %[[LOR_END]] +; MSSA: [[LOR_END]]: +; MSSA-NEXT: [[TMP11:%.*]] = load i16, ptr @f, align 1 +; MSSA-NEXT: [[CONV_I_I6:%.*]] = sext i16 [[TMP11]] to i32 +; MSSA-NEXT: [[TMP12:%.*]] = load ptr, ptr @m, align 1 +; MSSA-NEXT: store i32 [[CONV_I_I6]], ptr [[TMP12]], align 1 +; MSSA-NEXT: ret void +; entry: %agg.tmp.ensured.sroa.0.i = alloca i16, align 1 @@ -126,3 +252,34 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) !48 = !DILocalVariable(name: "v", scope: !41, file: !1, line: 15, type: !5) !49 = !DILocalVariable(name: "d", scope: !41, file: !1, line: 15, type: !5) !50 = !DILocalVariable(name: "u", scope: !41, file: !1, line: 16, type: !5) +;. +; MDEP: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C11, file: [[META1:![0-9]+]], producer: "{{.*}}clang version {{.*}}", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None) +; MDEP: [[META1]] = !DIFile(filename: "{{.*}}bbi-78272.c", directory: {{.*}}) +; MDEP: [[META4]] = !DILocalVariable(name: "t", scope: [[META5:![0-9]+]], file: [[META1]], line: 15, type: [[META8:![0-9]+]]) +; MDEP: [[META5]] = distinct !DISubprogram(name: "x", scope: [[META1]], file: [[META1]], line: 14, type: [[META6:![0-9]+]], scopeLine: 14, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META9:![0-9]+]]) +; MDEP: [[META6]] = !DISubroutineType(types: [[META7:![0-9]+]]) +; MDEP: [[META7]] = !{[[META8]]} +; MDEP: [[META8]] = !DIBasicType(name: "int", size: 16, encoding: DW_ATE_signed) +; MDEP: [[META9]] = !{[[META4]], [[META10]], [[META11]], [[META12]], [[META13]]} +; MDEP: [[META10]] = !DILocalVariable(name: "c", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]]) +; MDEP: [[META11]] = !DILocalVariable(name: "v", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]]) +; MDEP: [[META12]] = !DILocalVariable(name: "d", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]]) +; MDEP: [[META13]] = !DILocalVariable(name: "u", scope: [[META5]], file: [[META1]], line: 16, type: [[META8]]) +; MDEP: [[META14]] = !DILocation(line: 15, column: 7, scope: [[META5]]) +;. +; MSSA: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C11, file: [[META1:![0-9]+]], producer: "{{.*}}clang version {{.*}}", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None) +; MSSA: [[META1]] = !DIFile(filename: "{{.*}}bbi-78272.c", directory: {{.*}}) +; MSSA: [[META4]] = !DILocalVariable(name: "t", scope: [[META5:![0-9]+]], file: [[META1]], line: 15, type: [[META8:![0-9]+]]) +; MSSA: [[META5]] = distinct !DISubprogram(name: "x", scope: [[META1]], file: [[META1]], line: 14, type: [[META6:![0-9]+]], scopeLine: 14, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META9:![0-9]+]]) +; MSSA: [[META6]] = !DISubroutineType(types: [[META7:![0-9]+]]) +; MSSA: [[META7]] = !{[[META8]]} +; MSSA: [[META8]] = !DIBasicType(name: "int", size: 16, encoding: DW_ATE_signed) +; MSSA: [[META9]] = !{[[META4]], [[META10]], [[META11]], [[META12]], [[META13]]} +; MSSA: [[META10]] = !DILocalVariable(name: "c", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]]) +; MSSA: [[META11]] = !DILocalVariable(name: "v", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]]) +; MSSA: [[META12]] = !DILocalVariable(name: "d", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]]) +; MSSA: [[META13]] = !DILocalVariable(name: "u", scope: [[META5]], file: [[META1]], line: 16, type: [[META8]]) +; MSSA: [[META14]] = !DILocation(line: 15, column: 7, scope: [[META5]]) +;. +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/Transforms/GVN/PRE/pre-load-guards.ll b/llvm/test/Transforms/GVN/PRE/pre-load-guards.ll index 1ca907d..ca1852f 100644 --- a/llvm/test/Transforms/GVN/PRE/pre-load-guards.ll +++ b/llvm/test/Transforms/GVN/PRE/pre-load-guards.ll @@ -1,4 +1,6 @@ -; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP +; RUN: opt < %s -passes='gvn<memoryssa>' -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" declare void @llvm.experimental.guard(i1, ...) @@ -8,20 +10,33 @@ declare void @llvm.experimental.guard(i1, ...) ; the element in this case and deoptimize otherwise. If we hoist the load to a ; place above the guard, it will may lead to out-of-bound array access. define i32 @test_motivation(ptr %p, ptr %q, i1 %C, i32 %index, i32 %len) { -; CHECK-LABEL: @test_motivation( +; CHECK-LABEL: define i32 @test_motivation( +; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i32 [[INDEX:%.*]], i32 [[LEN:%.*]]) { +; CHECK-NEXT: [[BLOCK1:.*:]] +; CHECK-NEXT: [[EL1:%.*]] = getelementptr inbounds i32, ptr [[Q]], i32 [[INDEX]] +; CHECK-NEXT: [[EL2:%.*]] = getelementptr inbounds i32, ptr [[P]], i32 [[INDEX]] +; CHECK-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]] +; CHECK: [[BLOCK2]]: +; CHECK-NEXT: br label %[[BLOCK4:.*]] +; CHECK: [[BLOCK3]]: +; CHECK-NEXT: store i32 0, ptr [[EL1]], align 4 +; CHECK-NEXT: br label %[[BLOCK4]] +; CHECK: [[BLOCK4]]: +; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[EL2]], %[[BLOCK3]] ], [ [[EL1]], %[[BLOCK2]] ] +; CHECK-NEXT: [[COND1:%.*]] = icmp sge i32 [[INDEX]], 0 +; CHECK-NEXT: [[COND2:%.*]] = icmp slt i32 [[INDEX]], [[LEN]] +; CHECK-NEXT: [[IN_BOUNDS:%.*]] = and i1 [[COND1]], [[COND2]] +; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[IN_BOUNDS]]) [ "deopt"() ] +; CHECK-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4 +; CHECK-NEXT: ret i32 [[PRE]] +; block1: %el1 = getelementptr inbounds i32, ptr %q, i32 %index %el2 = getelementptr inbounds i32, ptr %p, i32 %index - br i1 %C, label %block2, label %block3 + br i1 %C, label %block2, label %block3 block2: -; CHECK: block2: -; CHECK-NEXT: br -; CHECK-NOT: load -; CHECK-NOT: sge -; CHECK-NOT: slt -; CHECK-NOT: and br label %block4 block3: @@ -30,13 +45,6 @@ block3: block4: -; CHECK: block4: -; CHECK: %cond1 = icmp sge i32 %index, 0 -; CHECK-NEXT: %cond2 = icmp slt i32 %index, %len -; CHECK-NEXT: %in.bounds = and i1 %cond1, %cond2 -; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 %in.bounds) -; CHECK-NEXT: %PRE = load i32, ptr %P2 -; CHECK: ret i32 %PRE %P2 = phi ptr [%el2, %block3], [%el1, %block2] %cond1 = icmp sge i32 %index, 0 @@ -49,17 +57,28 @@ block4: ; Guard in load's block that is above the load should prohibit the PRE. define i32 @test_guard_01(ptr %p, ptr %q, i1 %C, i1 %G) { -; CHECK-LABEL: @test_guard_01( +; CHECK-LABEL: define i32 @test_guard_01( +; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i1 [[G:%.*]]) { +; CHECK-NEXT: [[BLOCK1:.*:]] +; CHECK-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]] +; CHECK: [[BLOCK2]]: +; CHECK-NEXT: br label %[[BLOCK4:.*]] +; CHECK: [[BLOCK3]]: +; CHECK-NEXT: store i32 0, ptr [[P]], align 4 +; CHECK-NEXT: br label %[[BLOCK4]] +; CHECK: [[BLOCK4]]: +; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], %[[BLOCK3]] ], [ [[Q]], %[[BLOCK2]] ] +; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[G]]) [ "deopt"() ] +; CHECK-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4 +; CHECK-NEXT: ret i32 [[PRE]] +; block1: - br i1 %C, label %block2, label %block3 + br i1 %C, label %block2, label %block3 block2: -; CHECK: block2: -; CHECK-NEXT: br -; CHECK-NOT: load - br label %block4 + br label %block4 block3: store i32 0, ptr %p @@ -67,10 +86,6 @@ block3: block4: -; CHECK: block4: -; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 %G) -; CHECK-NEXT: load -; CHECK: ret i32 %P2 = phi ptr [%p, %block3], [%q, %block2] call void (i1, ...) @llvm.experimental.guard(i1 %G) [ "deopt"() ] @@ -80,16 +95,44 @@ block4: ; Guard in load's block that is below the load should not prohibit the PRE. define i32 @test_guard_02(ptr %p, ptr %q, i1 %C, i1 %G) { -; CHECK-LABEL: @test_guard_02( +; MDEP-LABEL: define i32 @test_guard_02( +; MDEP-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i1 [[G:%.*]]) { +; MDEP-NEXT: [[BLOCK1:.*:]] +; MDEP-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]] +; MDEP: [[BLOCK2]]: +; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[Q]], align 4 +; MDEP-NEXT: br label %[[BLOCK4:.*]] +; MDEP: [[BLOCK3]]: +; MDEP-NEXT: store i32 0, ptr [[P]], align 4 +; MDEP-NEXT: br label %[[BLOCK4]] +; MDEP: [[BLOCK4]]: +; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, %[[BLOCK3]] ], [ [[PRE_PRE]], %[[BLOCK2]] ] +; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], %[[BLOCK3]] ], [ [[Q]], %[[BLOCK2]] ] +; MDEP-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[G]]) [ "deopt"() ] +; MDEP-NEXT: ret i32 [[PRE]] +; +; MSSA-LABEL: define i32 @test_guard_02( +; MSSA-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i1 [[G:%.*]]) { +; MSSA-NEXT: [[BLOCK1:.*:]] +; MSSA-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]] +; MSSA: [[BLOCK2]]: +; MSSA-NEXT: br label %[[BLOCK4:.*]] +; MSSA: [[BLOCK3]]: +; MSSA-NEXT: store i32 0, ptr [[P]], align 4 +; MSSA-NEXT: br label %[[BLOCK4]] +; MSSA: [[BLOCK4]]: +; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], %[[BLOCK3]] ], [ [[Q]], %[[BLOCK2]] ] +; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4 +; MSSA-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[G]]) [ "deopt"() ] +; MSSA-NEXT: ret i32 [[PRE]] +; block1: - br i1 %C, label %block2, label %block3 + br i1 %C, label %block2, label %block3 block2: -; CHECK: block2: -; CHECK-NEXT: load i32, ptr %q - br label %block4 + br label %block4 block3: store i32 0, ptr %p @@ -97,12 +140,6 @@ block3: block4: -; CHECK: block4: -; CHECK-NEXT: phi i32 [ -; CHECK-NEXT: phi ptr [ -; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %G) -; CHECK-NOT: load -; CHECK: ret i32 %P2 = phi ptr [%p, %block3], [%q, %block2] %PRE = load i32, ptr %P2 @@ -112,17 +149,28 @@ block4: ; Guard above the load's block should prevent PRE from hoisting through it. define i32 @test_guard_03(ptr %p, ptr %q, i1 %C, i1 %G) { -; CHECK-LABEL: @test_guard_03( +; CHECK-LABEL: define i32 @test_guard_03( +; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i1 [[G:%.*]]) { +; CHECK-NEXT: [[BLOCK1:.*:]] +; CHECK-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]] +; CHECK: [[BLOCK2]]: +; CHECK-NEXT: br label %[[BLOCK4:.*]] +; CHECK: [[BLOCK3]]: +; CHECK-NEXT: store i32 0, ptr [[P]], align 4 +; CHECK-NEXT: br label %[[BLOCK4]] +; CHECK: [[BLOCK4]]: +; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], %[[BLOCK3]] ], [ [[Q]], %[[BLOCK2]] ] +; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[G]]) [ "deopt"() ] +; CHECK-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4 +; CHECK-NEXT: ret i32 [[PRE]] +; block1: - br i1 %C, label %block2, label %block3 + br i1 %C, label %block2, label %block3 block2: -; CHECK: block2: -; CHECK-NEXT: br -; CHECK-NOT: load - br label %block4 + br label %block4 block3: store i32 0, ptr %p @@ -130,11 +178,6 @@ block3: block4: -; CHECK: block4: -; CHECK-NEXT: phi ptr -; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %G) -; CHECK-NEXT: load -; CHECK-NEXT: ret i32 %P2 = phi ptr [%p, %block3], [%q, %block2] call void (i1, ...) @llvm.experimental.guard(i1 %G) [ "deopt"() ] diff --git a/llvm/test/Transforms/GVN/PRE/pre-load-implicit-cf-updates.ll b/llvm/test/Transforms/GVN/PRE/pre-load-implicit-cf-updates.ll index 0585781..17fbc0e 100644 --- a/llvm/test/Transforms/GVN/PRE/pre-load-implicit-cf-updates.ll +++ b/llvm/test/Transforms/GVN/PRE/pre-load-implicit-cf-updates.ll @@ -1,4 +1,6 @@ -; RUN: opt -S -passes=gvn -enable-load-pre < %s | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S -passes=gvn -enable-load-pre < %s | FileCheck %s --check-prefixes=CHECK,MDEP +; RUN: opt -S -passes='gvn<memoryssa>' -enable-load-pre < %s | FileCheck %s --check-prefixes=CHECK,MSSA target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" @@ -9,18 +11,28 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" declare i32 @foo(i32 %arg) #0 define hidden void @test_01(i32 %x, i32 %y) { - ; c2 only throws if c1 throws, so it can be safely removed and then PRE can ; hoist the load out of loop. - -; CHECK-LABEL: @test_01 -; CHECK: entry: -; CHECK-NEXT: %c1 = call i32 @foo(i32 %x) -; CHECK-NEXT: %val.pre = load i32, ptr null, align 8 -; CHECK-NEXT: br label %loop -; CHECK: loop: -; CHECK-NEXT: %c3 = call i32 @foo(i32 %val.pre) -; CHECK-NEXT: br label %loop +; MDEP-LABEL: define hidden void @test_01( +; MDEP-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; MDEP-NEXT: [[ENTRY:.*:]] +; MDEP-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]]) +; MDEP-NEXT: [[VAL_PRE:%.*]] = load i32, ptr null, align 8 +; MDEP-NEXT: br label %[[LOOP:.*]] +; MDEP: [[LOOP]]: +; MDEP-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[VAL_PRE]]) +; MDEP-NEXT: br label %[[LOOP]] +; +; MSSA-LABEL: define hidden void @test_01( +; MSSA-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; MSSA-NEXT: [[ENTRY:.*:]] +; MSSA-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]]) +; MSSA-NEXT: br label %[[LOOP:.*]] +; MSSA: [[LOOP]]: +; MSSA-NEXT: [[VAL:%.*]] = load i32, ptr null, align 8 +; MSSA-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[VAL]]) +; MSSA-NEXT: br label %[[LOOP]] +; entry: %c1 = call i32 @foo(i32 %x) @@ -34,18 +46,18 @@ loop: } define hidden void @test_02(i32 %x, i32 %y) { - ; PRE is not allowed because c2 may throw. - -; CHECK-LABEL: @test_02 -; CHECK: entry: -; CHECK-NEXT: %c1 = call i32 @foo(i32 %x) -; CHECK-NEXT: br label %loop -; CHECK: loop: -; CHECK-NEXT: %c2 = call i32 @foo(i32 %y) -; CHECK-NEXT: %val = load i32, ptr null, align 8 -; CHECK-NEXT: %c3 = call i32 @foo(i32 %val) -; CHECK-NEXT: br label %loop +; CHECK-LABEL: define hidden void @test_02( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]]) +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[C2:%.*]] = call i32 @foo(i32 [[Y]]) +; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr null, align 8 +; CHECK-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[VAL]]) +; CHECK-NEXT: br label %[[LOOP]] +; entry: %c1 = call i32 @foo(i32 %x) @@ -59,19 +71,31 @@ loop: } define hidden void @test_03(i32 %x, i32 %y) { - ; PRE of load is allowed because c2 only throws if c1 throws. c3 should ; not be eliminated. c4 is eliminated because it only throws if c3 throws. - -; CHECK-LABEL: @test_03 -; CHECK: entry: -; CHECK-NEXT: %c1 = call i32 @foo(i32 %x) -; CHECK-NEXT: %val.pre = load i32, ptr null, align 8 -; CHECK-NEXT: br label %loop -; CHECK: loop: -; CHECK-NEXT: %c3 = call i32 @foo(i32 %y) -; CHECK-NEXT: %c5 = call i32 @foo(i32 %val.pre) -; CHECK-NEXT: br label %loop +; MDEP-LABEL: define hidden void @test_03( +; MDEP-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; MDEP-NEXT: [[ENTRY:.*:]] +; MDEP-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]]) +; MDEP-NEXT: [[VAL_PRE:%.*]] = load i32, ptr null, align 8 +; MDEP-NEXT: br label %[[LOOP:.*]] +; MDEP: [[LOOP]]: +; MDEP-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[Y]]) +; MDEP-NEXT: [[C5:%.*]] = call i32 @foo(i32 [[VAL_PRE]]) +; MDEP-NEXT: br label %[[LOOP]] +; +; MSSA-LABEL: define hidden void @test_03( +; MSSA-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; MSSA-NEXT: [[ENTRY:.*:]] +; MSSA-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]]) +; MSSA-NEXT: br label %[[LOOP:.*]] +; MSSA: [[LOOP]]: +; MSSA-NEXT: [[VAL:%.*]] = load i32, ptr null, align 8 +; MSSA-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[Y]]) +; MSSA-NEXT: [[VAL2:%.*]] = load i32, ptr null, align 8 +; MSSA-NEXT: [[C5:%.*]] = call i32 @foo(i32 [[VAL]]) +; MSSA-NEXT: br label %[[LOOP]] +; entry: %c1 = call i32 @foo(i32 %x) @@ -88,18 +112,18 @@ loop: } define hidden void @test_04(i32 %x, i32 %y) { - ; PRE is not allowed even after we remove c2 because now c3 prevents us from it. - -; CHECK-LABEL: @test_04 -; CHECK: entry: -; CHECK-NEXT: %c1 = call i32 @foo(i32 %x) -; CHECK-NEXT: br label %loop -; CHECK: loop: -; CHECK-NEXT: %c3 = call i32 @foo(i32 %y) -; CHECK-NEXT: %val = load i32, ptr null, align 8 -; CHECK-NEXT: %c5 = call i32 @foo(i32 %val) -; CHECK-NEXT: br label %loop +; CHECK-LABEL: define hidden void @test_04( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]]) +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[Y]]) +; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr null, align 8 +; CHECK-NEXT: [[C5:%.*]] = call i32 @foo(i32 [[VAL]]) +; CHECK-NEXT: br label %[[LOOP]] +; entry: %c1 = call i32 @foo(i32 %x) diff --git a/llvm/test/Transforms/GVN/PRE/pre-load.ll b/llvm/test/Transforms/GVN/PRE/pre-load.ll index bbd20bc..5a07f9f 100644 --- a/llvm/test/Transforms/GVN/PRE/pre-load.ll +++ b/llvm/test/Transforms/GVN/PRE/pre-load.ll @@ -1,21 +1,34 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s +; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP +; RUN: opt < %s -passes='gvn<memoryssa>' -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA ; RUN: opt < %s -aa-pipeline=basic-aa -passes="gvn<load-pre>" -enable-load-pre=false -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" define i32 @test1(ptr %p, i1 %C) { -; CHECK-LABEL: @test1( -; CHECK-NEXT: block1: -; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] -; CHECK: block2: -; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4 -; CHECK-NEXT: br label [[BLOCK4:%.*]] -; CHECK: block3: -; CHECK-NEXT: store i32 0, ptr [[P]], align 4 -; CHECK-NEXT: br label [[BLOCK4]] -; CHECK: block4: -; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ] -; CHECK-NEXT: ret i32 [[PRE]] +; MDEP-LABEL: @test1( +; MDEP-NEXT: block1: +; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] +; MDEP: block2: +; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4 +; MDEP-NEXT: br label [[BLOCK4:%.*]] +; MDEP: block3: +; MDEP-NEXT: store i32 0, ptr [[P]], align 4 +; MDEP-NEXT: br label [[BLOCK4]] +; MDEP: block4: +; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ] +; MDEP-NEXT: ret i32 [[PRE]] +; +; MSSA-LABEL: @test1( +; MSSA-NEXT: block1: +; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] +; MSSA: block2: +; MSSA-NEXT: br label [[BLOCK4:%.*]] +; MSSA: block3: +; MSSA-NEXT: store i32 0, ptr [[P:%.*]], align 4 +; MSSA-NEXT: br label [[BLOCK4]] +; MSSA: block4: +; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P]], align 4 +; MSSA-NEXT: ret i32 [[PRE]] ; block1: br i1 %C, label %block2, label %block3 @@ -34,19 +47,32 @@ block4: ; This is a simple phi translation case. define i32 @test2(ptr %p, ptr %q, i1 %C) { -; CHECK-LABEL: @test2( -; CHECK-NEXT: block1: -; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] -; CHECK: block2: -; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[Q:%.*]], align 4 -; CHECK-NEXT: br label [[BLOCK4:%.*]] -; CHECK: block3: -; CHECK-NEXT: store i32 0, ptr [[P:%.*]], align 4 -; CHECK-NEXT: br label [[BLOCK4]] -; CHECK: block4: -; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ] -; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ] -; CHECK-NEXT: ret i32 [[PRE]] +; MDEP-LABEL: @test2( +; MDEP-NEXT: block1: +; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] +; MDEP: block2: +; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[Q:%.*]], align 4 +; MDEP-NEXT: br label [[BLOCK4:%.*]] +; MDEP: block3: +; MDEP-NEXT: store i32 0, ptr [[P:%.*]], align 4 +; MDEP-NEXT: br label [[BLOCK4]] +; MDEP: block4: +; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ] +; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ] +; MDEP-NEXT: ret i32 [[PRE]] +; +; MSSA-LABEL: @test2( +; MSSA-NEXT: block1: +; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] +; MSSA: block2: +; MSSA-NEXT: br label [[BLOCK4:%.*]] +; MSSA: block3: +; MSSA-NEXT: store i32 0, ptr [[P:%.*]], align 4 +; MSSA-NEXT: br label [[BLOCK4]] +; MSSA: block4: +; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q:%.*]], [[BLOCK2]] ] +; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4 +; MSSA-NEXT: ret i32 [[PRE]] ; block1: br i1 %C, label %block2, label %block3 @@ -66,23 +92,40 @@ block4: ; This is a PRE case that requires phi translation through a GEP. define i32 @test3(ptr %p, ptr %q, ptr %Hack, i1 %C) { -; CHECK-LABEL: @test3( -; CHECK-NEXT: block1: -; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1 -; CHECK-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8 -; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] -; CHECK: block2: -; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[B]], align 4 -; CHECK-NEXT: br label [[BLOCK4:%.*]] -; CHECK: block3: -; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1 -; CHECK-NEXT: store i32 0, ptr [[A]], align 4 -; CHECK-NEXT: br label [[BLOCK4]] -; CHECK: block4: -; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ] -; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ] -; CHECK-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1 -; CHECK-NEXT: ret i32 [[PRE]] +; MDEP-LABEL: @test3( +; MDEP-NEXT: block1: +; MDEP-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1 +; MDEP-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8 +; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] +; MDEP: block2: +; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[B]], align 4 +; MDEP-NEXT: br label [[BLOCK4:%.*]] +; MDEP: block3: +; MDEP-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1 +; MDEP-NEXT: store i32 0, ptr [[A]], align 4 +; MDEP-NEXT: br label [[BLOCK4]] +; MDEP: block4: +; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ] +; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ] +; MDEP-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1 +; MDEP-NEXT: ret i32 [[PRE]] +; +; MSSA-LABEL: @test3( +; MSSA-NEXT: block1: +; MSSA-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1 +; MSSA-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8 +; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] +; MSSA: block2: +; MSSA-NEXT: br label [[BLOCK4:%.*]] +; MSSA: block3: +; MSSA-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1 +; MSSA-NEXT: store i32 0, ptr [[A]], align 4 +; MSSA-NEXT: br label [[BLOCK4]] +; MSSA: block4: +; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ] +; MSSA-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1 +; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P3]], align 4 +; MSSA-NEXT: ret i32 [[PRE]] ; block1: %B = getelementptr i32, ptr %q, i32 1 @@ -107,24 +150,41 @@ block4: ;; Here the loaded address is available, but the computation is in 'block3' ;; which does not dominate 'block2'. define i32 @test4(ptr %p, ptr %q, ptr %Hack, i1 %C) { -; CHECK-LABEL: @test4( -; CHECK-NEXT: block1: -; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] -; CHECK: block2: -; CHECK-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1 -; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4 -; CHECK-NEXT: br label [[BLOCK4:%.*]] -; CHECK: block3: -; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q]], i32 1 -; CHECK-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8 -; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1 -; CHECK-NEXT: store i32 0, ptr [[A]], align 4 -; CHECK-NEXT: br label [[BLOCK4]] -; CHECK: block4: -; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ] -; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ] -; CHECK-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1 -; CHECK-NEXT: ret i32 [[PRE]] +; MDEP-LABEL: @test4( +; MDEP-NEXT: block1: +; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] +; MDEP: block2: +; MDEP-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1 +; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4 +; MDEP-NEXT: br label [[BLOCK4:%.*]] +; MDEP: block3: +; MDEP-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q]], i32 1 +; MDEP-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8 +; MDEP-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1 +; MDEP-NEXT: store i32 0, ptr [[A]], align 4 +; MDEP-NEXT: br label [[BLOCK4]] +; MDEP: block4: +; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ] +; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ] +; MDEP-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1 +; MDEP-NEXT: ret i32 [[PRE]] +; +; MSSA-LABEL: @test4( +; MSSA-NEXT: block1: +; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] +; MSSA: block2: +; MSSA-NEXT: br label [[BLOCK4:%.*]] +; MSSA: block3: +; MSSA-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1 +; MSSA-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8 +; MSSA-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1 +; MSSA-NEXT: store i32 0, ptr [[A]], align 4 +; MSSA-NEXT: br label [[BLOCK4]] +; MSSA: block4: +; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ] +; MSSA-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1 +; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P3]], align 4 +; MSSA-NEXT: ret i32 [[PRE]] ; block1: br i1 %C, label %block2, label %block3 @@ -149,24 +209,41 @@ block4: ; Same as test4, with a nuw flag on the GEP. define i32 @test4_nuw(ptr %p, ptr %q, ptr %Hack, i1 %C) { -; CHECK-LABEL: @test4_nuw( -; CHECK-NEXT: block1: -; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] -; CHECK: block2: -; CHECK-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr nuw i32, ptr [[Q:%.*]], i32 1 -; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4 -; CHECK-NEXT: br label [[BLOCK4:%.*]] -; CHECK: block3: -; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q]], i32 1 -; CHECK-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8 -; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1 -; CHECK-NEXT: store i32 0, ptr [[A]], align 4 -; CHECK-NEXT: br label [[BLOCK4]] -; CHECK: block4: -; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ] -; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ] -; CHECK-NEXT: [[P3:%.*]] = getelementptr nuw i32, ptr [[P2]], i32 1 -; CHECK-NEXT: ret i32 [[PRE]] +; MDEP-LABEL: @test4_nuw( +; MDEP-NEXT: block1: +; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] +; MDEP: block2: +; MDEP-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr nuw i32, ptr [[Q:%.*]], i32 1 +; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4 +; MDEP-NEXT: br label [[BLOCK4:%.*]] +; MDEP: block3: +; MDEP-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q]], i32 1 +; MDEP-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8 +; MDEP-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1 +; MDEP-NEXT: store i32 0, ptr [[A]], align 4 +; MDEP-NEXT: br label [[BLOCK4]] +; MDEP: block4: +; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ] +; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ] +; MDEP-NEXT: [[P3:%.*]] = getelementptr nuw i32, ptr [[P2]], i32 1 +; MDEP-NEXT: ret i32 [[PRE]] +; +; MSSA-LABEL: @test4_nuw( +; MSSA-NEXT: block1: +; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] +; MSSA: block2: +; MSSA-NEXT: br label [[BLOCK4:%.*]] +; MSSA: block3: +; MSSA-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1 +; MSSA-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8 +; MSSA-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1 +; MSSA-NEXT: store i32 0, ptr [[A]], align 4 +; MSSA-NEXT: br label [[BLOCK4]] +; MSSA: block4: +; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ] +; MSSA-NEXT: [[P3:%.*]] = getelementptr nuw i32, ptr [[P2]], i32 1 +; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P3]], align 4 +; MSSA-NEXT: ret i32 [[PRE]] ; block1: br i1 %C, label %block2, label %block3 @@ -196,28 +273,50 @@ block4: ;} define void @test5(i32 %N, ptr nocapture %G) nounwind ssp { -; CHECK-LABEL: @test5( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0 -; CHECK-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] -; CHECK: bb.nph: -; CHECK-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64 -; CHECK-NEXT: [[DOTPRE:%.*]] = load double, ptr [[G:%.*]], align 8 -; CHECK-NEXT: br label [[BB:%.*]] -; CHECK: bb: -; CHECK-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP3:%.*]], [[BB]] ] -; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ] -; CHECK-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1 -; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP6]] -; CHECK-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]] -; CHECK-NEXT: [[TMP3]] = load double, ptr [[SCEVGEP]], align 8 -; CHECK-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]] -; CHECK-NEXT: store double [[TMP4]], ptr [[SCEVGEP7]], align 8 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] -; CHECK: return: -; CHECK-NEXT: ret void +; MDEP-LABEL: @test5( +; MDEP-NEXT: entry: +; MDEP-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1 +; MDEP-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0 +; MDEP-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] +; MDEP: bb.nph: +; MDEP-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64 +; MDEP-NEXT: [[DOTPRE:%.*]] = load double, ptr [[G:%.*]], align 8 +; MDEP-NEXT: br label [[BB:%.*]] +; MDEP: bb: +; MDEP-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP3:%.*]], [[BB]] ] +; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ] +; MDEP-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1 +; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP6]] +; MDEP-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]] +; MDEP-NEXT: [[TMP3]] = load double, ptr [[SCEVGEP]], align 8 +; MDEP-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]] +; MDEP-NEXT: store double [[TMP4]], ptr [[SCEVGEP7]], align 8 +; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]] +; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] +; MDEP: return: +; MDEP-NEXT: ret void +; +; MSSA-LABEL: @test5( +; MSSA-NEXT: entry: +; MSSA-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1 +; MSSA-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0 +; MSSA-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] +; MSSA: bb.nph: +; MSSA-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64 +; MSSA-NEXT: br label [[BB:%.*]] +; MSSA: bb: +; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ] +; MSSA-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1 +; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G:%.*]], i64 [[TMP6]] +; MSSA-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]] +; MSSA-NEXT: [[TMP2:%.*]] = load double, ptr [[SCEVGEP7]], align 8 +; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8 +; MSSA-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]] +; MSSA-NEXT: store double [[TMP4]], ptr [[SCEVGEP7]], align 8 +; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]] +; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] +; MSSA: return: +; MSSA-NEXT: ret void ; entry: %0 = add i32 %N, -1 @@ -254,28 +353,50 @@ return: ;} define void @test6(i32 %N, ptr nocapture %G) nounwind ssp { -; CHECK-LABEL: @test6( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0 -; CHECK-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] -; CHECK: bb.nph: -; CHECK-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64 -; CHECK-NEXT: [[DOTPRE:%.*]] = load double, ptr [[G:%.*]], align 8 -; CHECK-NEXT: br label [[BB:%.*]] -; CHECK: bb: -; CHECK-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ] -; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ] -; CHECK-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1 -; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP6]] -; CHECK-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]] -; CHECK-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8 -; CHECK-NEXT: [[TMP4]] = fadd double [[TMP2]], [[TMP3]] -; CHECK-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] -; CHECK: return: -; CHECK-NEXT: ret void +; MDEP-LABEL: @test6( +; MDEP-NEXT: entry: +; MDEP-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1 +; MDEP-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0 +; MDEP-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] +; MDEP: bb.nph: +; MDEP-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64 +; MDEP-NEXT: [[DOTPRE:%.*]] = load double, ptr [[G:%.*]], align 8 +; MDEP-NEXT: br label [[BB:%.*]] +; MDEP: bb: +; MDEP-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ] +; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ] +; MDEP-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1 +; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP6]] +; MDEP-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]] +; MDEP-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8 +; MDEP-NEXT: [[TMP4]] = fadd double [[TMP2]], [[TMP3]] +; MDEP-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8 +; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]] +; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] +; MDEP: return: +; MDEP-NEXT: ret void +; +; MSSA-LABEL: @test6( +; MSSA-NEXT: entry: +; MSSA-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1 +; MSSA-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0 +; MSSA-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] +; MSSA: bb.nph: +; MSSA-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64 +; MSSA-NEXT: br label [[BB:%.*]] +; MSSA: bb: +; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ] +; MSSA-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1 +; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G:%.*]], i64 [[TMP6]] +; MSSA-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]] +; MSSA-NEXT: [[TMP2:%.*]] = load double, ptr [[SCEVGEP7]], align 8 +; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8 +; MSSA-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]] +; MSSA-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8 +; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]] +; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] +; MSSA: return: +; MSSA-NEXT: ret void ; entry: %0 = add i32 %N, -1 @@ -314,31 +435,57 @@ return: ; This requires phi translation of the adds. define void @test7(i32 %N, ptr nocapture %G) nounwind ssp { -; CHECK-LABEL: @test7( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds double, ptr [[G:%.*]], i64 1 -; CHECK-NEXT: store double 1.000000e+00, ptr [[TMP0]], align 8 -; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[N:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], 1 -; CHECK-NEXT: br i1 [[TMP2]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] -; CHECK: bb.nph: -; CHECK-NEXT: [[TMP:%.*]] = sext i32 [[TMP1]] to i64 -; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1 -; CHECK-NEXT: br label [[BB:%.*]] -; CHECK: bb: -; CHECK-NEXT: [[TMP3:%.*]] = phi double [ 1.000000e+00, [[BB_NPH]] ], [ [[TMP5:%.*]], [[BB]] ] -; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ] -; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2 -; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]] -; CHECK-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1 -; CHECK-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]] -; CHECK-NEXT: [[TMP4:%.*]] = load double, ptr [[SCEVGEP]], align 8 -; CHECK-NEXT: [[TMP5]] = fadd double [[TMP3]], [[TMP4]] -; CHECK-NEXT: store double [[TMP5]], ptr [[SCEVGEP]], align 8 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] -; CHECK: return: -; CHECK-NEXT: ret void +; MDEP-LABEL: @test7( +; MDEP-NEXT: entry: +; MDEP-NEXT: [[TMP0:%.*]] = getelementptr inbounds double, ptr [[G:%.*]], i64 1 +; MDEP-NEXT: store double 1.000000e+00, ptr [[TMP0]], align 8 +; MDEP-NEXT: [[TMP1:%.*]] = add i32 [[N:%.*]], -1 +; MDEP-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], 1 +; MDEP-NEXT: br i1 [[TMP2]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] +; MDEP: bb.nph: +; MDEP-NEXT: [[TMP:%.*]] = sext i32 [[TMP1]] to i64 +; MDEP-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1 +; MDEP-NEXT: br label [[BB:%.*]] +; MDEP: bb: +; MDEP-NEXT: [[TMP3:%.*]] = phi double [ 1.000000e+00, [[BB_NPH]] ], [ [[TMP5:%.*]], [[BB]] ] +; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ] +; MDEP-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2 +; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]] +; MDEP-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1 +; MDEP-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]] +; MDEP-NEXT: [[TMP4:%.*]] = load double, ptr [[SCEVGEP]], align 8 +; MDEP-NEXT: [[TMP5]] = fadd double [[TMP3]], [[TMP4]] +; MDEP-NEXT: store double [[TMP5]], ptr [[SCEVGEP]], align 8 +; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]] +; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] +; MDEP: return: +; MDEP-NEXT: ret void +; +; MSSA-LABEL: @test7( +; MSSA-NEXT: entry: +; MSSA-NEXT: [[TMP0:%.*]] = getelementptr inbounds double, ptr [[G:%.*]], i64 1 +; MSSA-NEXT: store double 1.000000e+00, ptr [[TMP0]], align 8 +; MSSA-NEXT: [[TMP1:%.*]] = add i32 [[N:%.*]], -1 +; MSSA-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], 1 +; MSSA-NEXT: br i1 [[TMP2]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] +; MSSA: bb.nph: +; MSSA-NEXT: [[TMP:%.*]] = sext i32 [[TMP1]] to i64 +; MSSA-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1 +; MSSA-NEXT: br label [[BB:%.*]] +; MSSA: bb: +; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ] +; MSSA-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2 +; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]] +; MSSA-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1 +; MSSA-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]] +; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP10]], align 8 +; MSSA-NEXT: [[TMP4:%.*]] = load double, ptr [[SCEVGEP]], align 8 +; MSSA-NEXT: [[TMP5:%.*]] = fadd double [[TMP3]], [[TMP4]] +; MSSA-NEXT: store double [[TMP5]], ptr [[SCEVGEP]], align 8 +; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]] +; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] +; MSSA: return: +; MSSA-NEXT: ret void ; entry: %0 = getelementptr inbounds double, ptr %G, i64 1 @@ -374,22 +521,37 @@ return: ;; Here the loaded address isn't available in 'block2' at all, requiring a new ;; GEP to be inserted into it. define i32 @test8(ptr %p, ptr %q, ptr %Hack, i1 %C) { -; CHECK-LABEL: @test8( -; CHECK-NEXT: block1: -; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] -; CHECK: block2: -; CHECK-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1 -; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4 -; CHECK-NEXT: br label [[BLOCK4:%.*]] -; CHECK: block3: -; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1 -; CHECK-NEXT: store i32 0, ptr [[A]], align 4 -; CHECK-NEXT: br label [[BLOCK4]] -; CHECK: block4: -; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ] -; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ] -; CHECK-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1 -; CHECK-NEXT: ret i32 [[PRE]] +; MDEP-LABEL: @test8( +; MDEP-NEXT: block1: +; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] +; MDEP: block2: +; MDEP-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1 +; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4 +; MDEP-NEXT: br label [[BLOCK4:%.*]] +; MDEP: block3: +; MDEP-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1 +; MDEP-NEXT: store i32 0, ptr [[A]], align 4 +; MDEP-NEXT: br label [[BLOCK4]] +; MDEP: block4: +; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ] +; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ] +; MDEP-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1 +; MDEP-NEXT: ret i32 [[PRE]] +; +; MSSA-LABEL: @test8( +; MSSA-NEXT: block1: +; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] +; MSSA: block2: +; MSSA-NEXT: br label [[BLOCK4:%.*]] +; MSSA: block3: +; MSSA-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1 +; MSSA-NEXT: store i32 0, ptr [[A]], align 4 +; MSSA-NEXT: br label [[BLOCK4]] +; MSSA: block4: +; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q:%.*]], [[BLOCK2]] ] +; MSSA-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1 +; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P3]], align 4 +; MSSA-NEXT: ret i32 [[PRE]] ; block1: br i1 %C, label %block2, label %block3 @@ -417,31 +579,55 @@ block4: ; This requires phi translation of the adds. define void @test9(i32 %N, ptr nocapture %G) nounwind ssp { -; CHECK-LABEL: @test9( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1 -; CHECK-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] -; CHECK: bb.nph: -; CHECK-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64 -; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1 -; CHECK-NEXT: [[SCEVGEP10_PHI_TRANS_INSERT:%.*]] = getelementptr double, ptr [[G:%.*]], i64 1 -; CHECK-NEXT: [[DOTPRE:%.*]] = load double, ptr [[SCEVGEP10_PHI_TRANS_INSERT]], align 8 -; CHECK-NEXT: br label [[BB:%.*]] -; CHECK: bb: -; CHECK-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ] -; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ] -; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2 -; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]] -; CHECK-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1 -; CHECK-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]] -; CHECK-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8 -; CHECK-NEXT: [[TMP4]] = fadd double [[TMP2]], [[TMP3]] -; CHECK-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] -; CHECK: return: -; CHECK-NEXT: ret void +; MDEP-LABEL: @test9( +; MDEP-NEXT: entry: +; MDEP-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1 +; MDEP-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1 +; MDEP-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] +; MDEP: bb.nph: +; MDEP-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64 +; MDEP-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1 +; MDEP-NEXT: [[SCEVGEP10_PHI_TRANS_INSERT:%.*]] = getelementptr double, ptr [[G:%.*]], i64 1 +; MDEP-NEXT: [[DOTPRE:%.*]] = load double, ptr [[SCEVGEP10_PHI_TRANS_INSERT]], align 8 +; MDEP-NEXT: br label [[BB:%.*]] +; MDEP: bb: +; MDEP-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ] +; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ] +; MDEP-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2 +; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]] +; MDEP-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1 +; MDEP-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]] +; MDEP-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8 +; MDEP-NEXT: [[TMP4]] = fadd double [[TMP2]], [[TMP3]] +; MDEP-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8 +; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]] +; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] +; MDEP: return: +; MDEP-NEXT: ret void +; +; MSSA-LABEL: @test9( +; MSSA-NEXT: entry: +; MSSA-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1 +; MSSA-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1 +; MSSA-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] +; MSSA: bb.nph: +; MSSA-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64 +; MSSA-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1 +; MSSA-NEXT: br label [[BB:%.*]] +; MSSA: bb: +; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ] +; MSSA-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2 +; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G:%.*]], i64 [[TMP8]] +; MSSA-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1 +; MSSA-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]] +; MSSA-NEXT: [[TMP2:%.*]] = load double, ptr [[SCEVGEP10]], align 8 +; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8 +; MSSA-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]] +; MSSA-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8 +; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]] +; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] +; MSSA: return: +; MSSA-NEXT: ret void ; entry: add i32 0, 0 @@ -482,35 +668,62 @@ return: ; PR5501 define void @test10(i32 %N, ptr nocapture %G) nounwind ssp { -; CHECK-LABEL: @test10( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1 -; CHECK-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] -; CHECK: bb.nph: -; CHECK-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64 -; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP]], -1 -; CHECK-NEXT: [[SCEVGEP12_PHI_TRANS_INSERT:%.*]] = getelementptr double, ptr [[G:%.*]], i64 1 -; CHECK-NEXT: [[DOTPRE:%.*]] = load double, ptr [[SCEVGEP12_PHI_TRANS_INSERT]], align 8 -; CHECK-NEXT: [[DOTPRE1:%.*]] = load double, ptr [[G]], align 8 -; CHECK-NEXT: br label [[BB:%.*]] -; CHECK: bb: -; CHECK-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE1]], [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ] -; CHECK-NEXT: [[TMP3:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ] -; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP11:%.*]], [[BB]] ] -; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]] -; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDVAR]], 2 -; CHECK-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]] -; CHECK-NEXT: [[TMP11]] = add i64 [[INDVAR]], 1 -; CHECK-NEXT: [[SCEVGEP12:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP11]] -; CHECK-NEXT: [[TMP4]] = load double, ptr [[SCEVGEP10]], align 8 -; CHECK-NEXT: [[TMP5:%.*]] = fadd double [[TMP3]], [[TMP4]] -; CHECK-NEXT: [[TMP6]] = fadd double [[TMP5]], [[TMP2]] -; CHECK-NEXT: store double [[TMP6]], ptr [[SCEVGEP12]], align 8 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP11]], [[TMP8]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] -; CHECK: return: -; CHECK-NEXT: ret void +; MDEP-LABEL: @test10( +; MDEP-NEXT: entry: +; MDEP-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1 +; MDEP-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1 +; MDEP-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] +; MDEP: bb.nph: +; MDEP-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64 +; MDEP-NEXT: [[TMP8:%.*]] = add i64 [[TMP]], -1 +; MDEP-NEXT: [[SCEVGEP12_PHI_TRANS_INSERT:%.*]] = getelementptr double, ptr [[G:%.*]], i64 1 +; MDEP-NEXT: [[DOTPRE:%.*]] = load double, ptr [[SCEVGEP12_PHI_TRANS_INSERT]], align 8 +; MDEP-NEXT: [[DOTPRE1:%.*]] = load double, ptr [[G]], align 8 +; MDEP-NEXT: br label [[BB:%.*]] +; MDEP: bb: +; MDEP-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE1]], [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ] +; MDEP-NEXT: [[TMP3:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ] +; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP11:%.*]], [[BB]] ] +; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]] +; MDEP-NEXT: [[TMP9:%.*]] = add i64 [[INDVAR]], 2 +; MDEP-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]] +; MDEP-NEXT: [[TMP11]] = add i64 [[INDVAR]], 1 +; MDEP-NEXT: [[SCEVGEP12:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP11]] +; MDEP-NEXT: [[TMP4]] = load double, ptr [[SCEVGEP10]], align 8 +; MDEP-NEXT: [[TMP5:%.*]] = fadd double [[TMP3]], [[TMP4]] +; MDEP-NEXT: [[TMP6]] = fadd double [[TMP5]], [[TMP2]] +; MDEP-NEXT: store double [[TMP6]], ptr [[SCEVGEP12]], align 8 +; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP11]], [[TMP8]] +; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] +; MDEP: return: +; MDEP-NEXT: ret void +; +; MSSA-LABEL: @test10( +; MSSA-NEXT: entry: +; MSSA-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1 +; MSSA-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1 +; MSSA-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]] +; MSSA: bb.nph: +; MSSA-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64 +; MSSA-NEXT: [[TMP8:%.*]] = add i64 [[TMP]], -1 +; MSSA-NEXT: br label [[BB:%.*]] +; MSSA: bb: +; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP11:%.*]], [[BB]] ] +; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G:%.*]], i64 [[INDVAR]] +; MSSA-NEXT: [[TMP9:%.*]] = add i64 [[INDVAR]], 2 +; MSSA-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]] +; MSSA-NEXT: [[TMP11]] = add i64 [[INDVAR]], 1 +; MSSA-NEXT: [[SCEVGEP12:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP11]] +; MSSA-NEXT: [[TMP2:%.*]] = load double, ptr [[SCEVGEP12]], align 8 +; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP10]], align 8 +; MSSA-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]] +; MSSA-NEXT: [[TMP5:%.*]] = load double, ptr [[SCEVGEP]], align 8 +; MSSA-NEXT: [[TMP6:%.*]] = fadd double [[TMP4]], [[TMP5]] +; MSSA-NEXT: store double [[TMP6]], ptr [[SCEVGEP12]], align 8 +; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP11]], [[TMP8]] +; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]] +; MSSA: return: +; MSSA-NEXT: ret void ; entry: %0 = add i32 %N, -1 @@ -547,24 +760,40 @@ return: ; Test critical edge splitting. define i32 @test11(ptr %p, i1 %C, i32 %N) { -; CHECK-LABEL: @test11( -; CHECK-NEXT: block1: -; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] -; CHECK: block2: -; CHECK-NEXT: [[COND:%.*]] = icmp sgt i32 [[N:%.*]], 1 -; CHECK-NEXT: br i1 [[COND]], label [[BLOCK2_BLOCK4_CRIT_EDGE:%.*]], label [[BLOCK5:%.*]] -; CHECK: block2.block4_crit_edge: -; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4 -; CHECK-NEXT: br label [[BLOCK4:%.*]] -; CHECK: block3: -; CHECK-NEXT: store i32 0, ptr [[P]], align 4 -; CHECK-NEXT: br label [[BLOCK4]] -; CHECK: block4: -; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ [[PRE_PRE]], [[BLOCK2_BLOCK4_CRIT_EDGE]] ], [ 0, [[BLOCK3]] ] -; CHECK-NEXT: br label [[BLOCK5]] -; CHECK: block5: -; CHECK-NEXT: [[RET:%.*]] = phi i32 [ 0, [[BLOCK2]] ], [ [[PRE]], [[BLOCK4]] ] -; CHECK-NEXT: ret i32 [[RET]] +; MDEP-LABEL: @test11( +; MDEP-NEXT: block1: +; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] +; MDEP: block2: +; MDEP-NEXT: [[COND:%.*]] = icmp sgt i32 [[N:%.*]], 1 +; MDEP-NEXT: br i1 [[COND]], label [[BLOCK2_BLOCK4_CRIT_EDGE:%.*]], label [[BLOCK5:%.*]] +; MDEP: block2.block4_crit_edge: +; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4 +; MDEP-NEXT: br label [[BLOCK4:%.*]] +; MDEP: block3: +; MDEP-NEXT: store i32 0, ptr [[P]], align 4 +; MDEP-NEXT: br label [[BLOCK4]] +; MDEP: block4: +; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ [[PRE_PRE]], [[BLOCK2_BLOCK4_CRIT_EDGE]] ], [ 0, [[BLOCK3]] ] +; MDEP-NEXT: br label [[BLOCK5]] +; MDEP: block5: +; MDEP-NEXT: [[RET:%.*]] = phi i32 [ 0, [[BLOCK2]] ], [ [[PRE]], [[BLOCK4]] ] +; MDEP-NEXT: ret i32 [[RET]] +; +; MSSA-LABEL: @test11( +; MSSA-NEXT: block1: +; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]] +; MSSA: block2: +; MSSA-NEXT: [[COND:%.*]] = icmp sgt i32 [[N:%.*]], 1 +; MSSA-NEXT: br i1 [[COND]], label [[BLOCK4:%.*]], label [[BLOCK5:%.*]] +; MSSA: block3: +; MSSA-NEXT: store i32 0, ptr [[P:%.*]], align 4 +; MSSA-NEXT: br label [[BLOCK4]] +; MSSA: block4: +; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P]], align 4 +; MSSA-NEXT: br label [[BLOCK5]] +; MSSA: block5: +; MSSA-NEXT: [[RET:%.*]] = phi i32 [ 0, [[BLOCK2]] ], [ [[PRE]], [[BLOCK4]] ] +; MSSA-NEXT: ret i32 [[RET]] ; block1: br i1 %C, label %block2, label %block3 @@ -726,17 +955,30 @@ follow_2: ; Since it is OK to speculate, PRE is allowed. define i32 @test15(ptr noalias nocapture readonly dereferenceable(8) align 4 %x, ptr noalias nocapture %r, i32 %a) nofree nosync { -; CHECK-LABEL: @test15( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0 -; CHECK-NEXT: [[VV_PRE:%.*]] = load i32, ptr [[X:%.*]], align 4 -; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] -; CHECK: if.then: -; CHECK-NEXT: store i32 [[VV_PRE]], ptr [[R:%.*]], align 4 -; CHECK-NEXT: br label [[IF_END]] -; CHECK: if.end: -; CHECK-NEXT: call void @f() -; CHECK-NEXT: ret i32 [[VV_PRE]] +; MDEP-LABEL: @test15( +; MDEP-NEXT: entry: +; MDEP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0 +; MDEP-NEXT: [[VV_PRE:%.*]] = load i32, ptr [[X:%.*]], align 4 +; MDEP-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] +; MDEP: if.then: +; MDEP-NEXT: store i32 [[VV_PRE]], ptr [[R:%.*]], align 4 +; MDEP-NEXT: br label [[IF_END]] +; MDEP: if.end: +; MDEP-NEXT: call void @f() +; MDEP-NEXT: ret i32 [[VV_PRE]] +; +; MSSA-LABEL: @test15( +; MSSA-NEXT: entry: +; MSSA-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0 +; MSSA-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] +; MSSA: if.then: +; MSSA-NEXT: [[UU:%.*]] = load i32, ptr [[X:%.*]], align 4 +; MSSA-NEXT: store i32 [[UU]], ptr [[R:%.*]], align 4 +; MSSA-NEXT: br label [[IF_END]] +; MSSA: if.end: +; MSSA-NEXT: call void @f() +; MSSA-NEXT: [[VV:%.*]] = load i32, ptr [[X]], align 4 +; MSSA-NEXT: ret i32 [[VV]] ; entry: @@ -763,17 +1005,30 @@ if.end: ; Since it is OK to speculate, PRE is allowed. define i32 @test16(ptr noalias nocapture readonly dereferenceable(8) align 4 %x, ptr noalias nocapture %r, i32 %a) nofree nosync { -; CHECK-LABEL: @test16( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0 -; CHECK-NEXT: [[VV_PRE:%.*]] = load i32, ptr [[X:%.*]], align 4 -; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] -; CHECK: if.then: -; CHECK-NEXT: store i32 [[VV_PRE]], ptr [[R:%.*]], align 4 -; CHECK-NEXT: br label [[IF_END]] -; CHECK: if.end: -; CHECK-NEXT: call void @f() -; CHECK-NEXT: ret i32 [[VV_PRE]] +; MDEP-LABEL: @test16( +; MDEP-NEXT: entry: +; MDEP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0 +; MDEP-NEXT: [[VV_PRE:%.*]] = load i32, ptr [[X:%.*]], align 4 +; MDEP-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] +; MDEP: if.then: +; MDEP-NEXT: store i32 [[VV_PRE]], ptr [[R:%.*]], align 4 +; MDEP-NEXT: br label [[IF_END]] +; MDEP: if.end: +; MDEP-NEXT: call void @f() +; MDEP-NEXT: ret i32 [[VV_PRE]] +; +; MSSA-LABEL: @test16( +; MSSA-NEXT: entry: +; MSSA-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0 +; MSSA-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] +; MSSA: if.then: +; MSSA-NEXT: [[UU:%.*]] = load i32, ptr [[X:%.*]], align 4 +; MSSA-NEXT: store i32 [[UU]], ptr [[R:%.*]], align 4 +; MSSA-NEXT: br label [[IF_END]] +; MSSA: if.end: +; MSSA-NEXT: call void @f() +; MSSA-NEXT: [[VV:%.*]] = load i32, ptr [[X]], align 4 +; MSSA-NEXT: ret i32 [[VV]] ; entry: @@ -808,36 +1063,67 @@ declare i1 @bar() ; We can move all loads into predecessors. define void @test17(ptr %p1, ptr %p2, ptr %p3, ptr %p4) -; CHECK-LABEL: @test17( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8 -; CHECK-NEXT: [[COND1:%.*]] = icmp sgt i64 [[V1]], 200 -; CHECK-NEXT: br i1 [[COND1]], label [[BB200:%.*]], label [[BB1:%.*]] -; CHECK: bb1: -; CHECK-NEXT: [[COND2:%.*]] = icmp sgt i64 [[V1]], 100 -; CHECK-NEXT: br i1 [[COND2]], label [[BB100:%.*]], label [[BB2:%.*]] -; CHECK: bb2: -; CHECK-NEXT: [[V2:%.*]] = add nsw i64 [[V1]], 1 -; CHECK-NEXT: store i64 [[V2]], ptr [[P1]], align 8 -; CHECK-NEXT: br label [[BB3:%.*]] -; CHECK: bb3: -; CHECK-NEXT: [[V3:%.*]] = phi i64 [ [[V3_PRE:%.*]], [[BB200]] ], [ [[V3_PRE1:%.*]], [[BB100]] ], [ [[V2]], [[BB2]] ] -; CHECK-NEXT: store i64 [[V3]], ptr [[P2:%.*]], align 8 -; CHECK-NEXT: ret void -; CHECK: bb100: -; CHECK-NEXT: [[COND3:%.*]] = call i1 @foo() -; CHECK-NEXT: [[V3_PRE1]] = load i64, ptr [[P1]], align 8 -; CHECK-NEXT: br i1 [[COND3]], label [[BB3]], label [[BB101:%.*]] -; CHECK: bb101: -; CHECK-NEXT: store i64 [[V3_PRE1]], ptr [[P3:%.*]], align 8 -; CHECK-NEXT: ret void -; CHECK: bb200: -; CHECK-NEXT: [[COND4:%.*]] = call i1 @bar() -; CHECK-NEXT: [[V3_PRE]] = load i64, ptr [[P1]], align 8 -; CHECK-NEXT: br i1 [[COND4]], label [[BB3]], label [[BB201:%.*]] -; CHECK: bb201: -; CHECK-NEXT: store i64 [[V3_PRE]], ptr [[P4:%.*]], align 8 -; CHECK-NEXT: ret void +; MDEP-LABEL: @test17( +; MDEP-NEXT: entry: +; MDEP-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8 +; MDEP-NEXT: [[COND1:%.*]] = icmp sgt i64 [[V1]], 200 +; MDEP-NEXT: br i1 [[COND1]], label [[BB200:%.*]], label [[BB1:%.*]] +; MDEP: bb1: +; MDEP-NEXT: [[COND2:%.*]] = icmp sgt i64 [[V1]], 100 +; MDEP-NEXT: br i1 [[COND2]], label [[BB100:%.*]], label [[BB2:%.*]] +; MDEP: bb2: +; MDEP-NEXT: [[V2:%.*]] = add nsw i64 [[V1]], 1 +; MDEP-NEXT: store i64 [[V2]], ptr [[P1]], align 8 +; MDEP-NEXT: br label [[BB3:%.*]] +; MDEP: bb3: +; MDEP-NEXT: [[V3:%.*]] = phi i64 [ [[V3_PRE:%.*]], [[BB200]] ], [ [[V3_PRE1:%.*]], [[BB100]] ], [ [[V2]], [[BB2]] ] +; MDEP-NEXT: store i64 [[V3]], ptr [[P2:%.*]], align 8 +; MDEP-NEXT: ret void +; MDEP: bb100: +; MDEP-NEXT: [[COND3:%.*]] = call i1 @foo() +; MDEP-NEXT: [[V3_PRE1]] = load i64, ptr [[P1]], align 8 +; MDEP-NEXT: br i1 [[COND3]], label [[BB3]], label [[BB101:%.*]] +; MDEP: bb101: +; MDEP-NEXT: store i64 [[V3_PRE1]], ptr [[P3:%.*]], align 8 +; MDEP-NEXT: ret void +; MDEP: bb200: +; MDEP-NEXT: [[COND4:%.*]] = call i1 @bar() +; MDEP-NEXT: [[V3_PRE]] = load i64, ptr [[P1]], align 8 +; MDEP-NEXT: br i1 [[COND4]], label [[BB3]], label [[BB201:%.*]] +; MDEP: bb201: +; MDEP-NEXT: store i64 [[V3_PRE]], ptr [[P4:%.*]], align 8 +; MDEP-NEXT: ret void +; +; MSSA-LABEL: @test17( +; MSSA-NEXT: entry: +; MSSA-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8 +; MSSA-NEXT: [[COND1:%.*]] = icmp sgt i64 [[V1]], 200 +; MSSA-NEXT: br i1 [[COND1]], label [[BB200:%.*]], label [[BB1:%.*]] +; MSSA: bb1: +; MSSA-NEXT: [[COND2:%.*]] = icmp sgt i64 [[V1]], 100 +; MSSA-NEXT: br i1 [[COND2]], label [[BB100:%.*]], label [[BB2:%.*]] +; MSSA: bb2: +; MSSA-NEXT: [[V2:%.*]] = add nsw i64 [[V1]], 1 +; MSSA-NEXT: store i64 [[V2]], ptr [[P1]], align 8 +; MSSA-NEXT: br label [[BB3:%.*]] +; MSSA: bb3: +; MSSA-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8 +; MSSA-NEXT: store i64 [[V3]], ptr [[P2:%.*]], align 8 +; MSSA-NEXT: ret void +; MSSA: bb100: +; MSSA-NEXT: [[COND3:%.*]] = call i1 @foo() +; MSSA-NEXT: br i1 [[COND3]], label [[BB3]], label [[BB101:%.*]] +; MSSA: bb101: +; MSSA-NEXT: [[V4:%.*]] = load i64, ptr [[P1]], align 8 +; MSSA-NEXT: store i64 [[V4]], ptr [[P3:%.*]], align 8 +; MSSA-NEXT: ret void +; MSSA: bb200: +; MSSA-NEXT: [[COND4:%.*]] = call i1 @bar() +; MSSA-NEXT: br i1 [[COND4]], label [[BB3]], label [[BB201:%.*]] +; MSSA: bb201: +; MSSA-NEXT: [[V5:%.*]] = load i64, ptr [[P1]], align 8 +; MSSA-NEXT: store i64 [[V5]], ptr [[P4:%.*]], align 8 +; MSSA-NEXT: ret void ; { entry: @@ -882,18 +1168,31 @@ bb201: ; So ValuesPerBlock[%if.then] should not be replaced when the load instruction ; is moved to %entry. define void @test18(i1 %cond, ptr %p1, ptr %p2) { -; CHECK-LABEL: @test18( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[V2_PRE:%.*]] = load i16, ptr [[P1:%.*]], align 2 -; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] -; CHECK: if.then: -; CHECK-NEXT: [[DEC:%.*]] = add i16 [[V2_PRE]], -1 -; CHECK-NEXT: store i16 [[DEC]], ptr [[P1]], align 2 -; CHECK-NEXT: br label [[IF_END]] -; CHECK: if.end: -; CHECK-NEXT: [[V2:%.*]] = phi i16 [ [[DEC]], [[IF_THEN]] ], [ [[V2_PRE]], [[ENTRY:%.*]] ] -; CHECK-NEXT: store i16 [[V2]], ptr [[P2:%.*]], align 2 -; CHECK-NEXT: ret void +; MDEP-LABEL: @test18( +; MDEP-NEXT: entry: +; MDEP-NEXT: [[V2_PRE:%.*]] = load i16, ptr [[P1:%.*]], align 2 +; MDEP-NEXT: br i1 [[COND:%.*]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] +; MDEP: if.then: +; MDEP-NEXT: [[DEC:%.*]] = add i16 [[V2_PRE]], -1 +; MDEP-NEXT: store i16 [[DEC]], ptr [[P1]], align 2 +; MDEP-NEXT: br label [[IF_END]] +; MDEP: if.end: +; MDEP-NEXT: [[V2:%.*]] = phi i16 [ [[DEC]], [[IF_THEN]] ], [ [[V2_PRE]], [[ENTRY:%.*]] ] +; MDEP-NEXT: store i16 [[V2]], ptr [[P2:%.*]], align 2 +; MDEP-NEXT: ret void +; +; MSSA-LABEL: @test18( +; MSSA-NEXT: entry: +; MSSA-NEXT: br i1 [[COND:%.*]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] +; MSSA: if.then: +; MSSA-NEXT: [[V1:%.*]] = load i16, ptr [[P1:%.*]], align 2 +; MSSA-NEXT: [[DEC:%.*]] = add i16 [[V1]], -1 +; MSSA-NEXT: store i16 [[DEC]], ptr [[P1]], align 2 +; MSSA-NEXT: br label [[IF_END]] +; MSSA: if.end: +; MSSA-NEXT: [[V2:%.*]] = load i16, ptr [[P1]], align 2 +; MSSA-NEXT: store i16 [[V2]], ptr [[P2:%.*]], align 2 +; MSSA-NEXT: ret void ; entry: br i1 %cond, label %if.end, label %if.then @@ -912,32 +1211,56 @@ if.end: ; PRE of load instructions should not cross exception handling instructions. define void @test19(i1 %cond, ptr %p1, ptr %p2) -; CHECK-LABEL: @test19( -; CHECK-NEXT: entry: -; CHECK-NEXT: br i1 [[COND:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]] -; CHECK: then: -; CHECK-NEXT: [[V2:%.*]] = load i64, ptr [[P2:%.*]], align 8 -; CHECK-NEXT: [[ADD:%.*]] = add i64 [[V2]], 1 -; CHECK-NEXT: store i64 [[ADD]], ptr [[P1:%.*]], align 8 -; CHECK-NEXT: br label [[END:%.*]] -; CHECK: else: -; CHECK-NEXT: invoke void @f() -; CHECK-NEXT: to label [[ELSE_END_CRIT_EDGE:%.*]] unwind label [[LPAD:%.*]] -; CHECK: else.end_crit_edge: -; CHECK-NEXT: [[V1_PRE:%.*]] = load i64, ptr [[P1]], align 8 -; CHECK-NEXT: br label [[END]] -; CHECK: end: -; CHECK-NEXT: [[V1:%.*]] = phi i64 [ [[V1_PRE]], [[ELSE_END_CRIT_EDGE]] ], [ [[ADD]], [[THEN]] ] -; CHECK-NEXT: [[AND:%.*]] = and i64 [[V1]], 100 -; CHECK-NEXT: store i64 [[AND]], ptr [[P2]], align 8 -; CHECK-NEXT: ret void -; CHECK: lpad: -; CHECK-NEXT: [[LP:%.*]] = landingpad { ptr, i32 } -; CHECK-NEXT: cleanup -; CHECK-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8 -; CHECK-NEXT: [[OR:%.*]] = or i64 [[V3]], 200 -; CHECK-NEXT: store i64 [[OR]], ptr [[P1]], align 8 -; CHECK-NEXT: resume { ptr, i32 } [[LP]] +; MDEP-LABEL: @test19( +; MDEP-NEXT: entry: +; MDEP-NEXT: br i1 [[COND:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]] +; MDEP: then: +; MDEP-NEXT: [[V2:%.*]] = load i64, ptr [[P2:%.*]], align 8 +; MDEP-NEXT: [[ADD:%.*]] = add i64 [[V2]], 1 +; MDEP-NEXT: store i64 [[ADD]], ptr [[P1:%.*]], align 8 +; MDEP-NEXT: br label [[END:%.*]] +; MDEP: else: +; MDEP-NEXT: invoke void @f() +; MDEP-NEXT: to label [[ELSE_END_CRIT_EDGE:%.*]] unwind label [[LPAD:%.*]] +; MDEP: else.end_crit_edge: +; MDEP-NEXT: [[V1_PRE:%.*]] = load i64, ptr [[P1]], align 8 +; MDEP-NEXT: br label [[END]] +; MDEP: end: +; MDEP-NEXT: [[V1:%.*]] = phi i64 [ [[V1_PRE]], [[ELSE_END_CRIT_EDGE]] ], [ [[ADD]], [[THEN]] ] +; MDEP-NEXT: [[AND:%.*]] = and i64 [[V1]], 100 +; MDEP-NEXT: store i64 [[AND]], ptr [[P2]], align 8 +; MDEP-NEXT: ret void +; MDEP: lpad: +; MDEP-NEXT: [[LP:%.*]] = landingpad { ptr, i32 } +; MDEP-NEXT: cleanup +; MDEP-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8 +; MDEP-NEXT: [[OR:%.*]] = or i64 [[V3]], 200 +; MDEP-NEXT: store i64 [[OR]], ptr [[P1]], align 8 +; MDEP-NEXT: resume { ptr, i32 } [[LP]] +; +; MSSA-LABEL: @test19( +; MSSA-NEXT: entry: +; MSSA-NEXT: br i1 [[COND:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]] +; MSSA: then: +; MSSA-NEXT: [[V2:%.*]] = load i64, ptr [[P2:%.*]], align 8 +; MSSA-NEXT: [[ADD:%.*]] = add i64 [[V2]], 1 +; MSSA-NEXT: store i64 [[ADD]], ptr [[P1:%.*]], align 8 +; MSSA-NEXT: br label [[END:%.*]] +; MSSA: else: +; MSSA-NEXT: invoke void @f() +; MSSA-NEXT: to label [[END]] unwind label [[LPAD:%.*]] +; MSSA: end: +; MSSA-NEXT: [[V1:%.*]] = load i64, ptr [[P1]], align 8 +; MSSA-NEXT: [[AND:%.*]] = and i64 [[V1]], 100 +; MSSA-NEXT: store i64 [[AND]], ptr [[P2]], align 8 +; MSSA-NEXT: ret void +; MSSA: lpad: +; MSSA-NEXT: [[LP:%.*]] = landingpad { ptr, i32 } +; MSSA-NEXT: cleanup +; MSSA-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8 +; MSSA-NEXT: [[OR:%.*]] = or i64 [[V3]], 200 +; MSSA-NEXT: store i64 [[OR]], ptr [[P1]], align 8 +; MSSA-NEXT: resume { ptr, i32 } [[LP]] ; personality ptr @__CxxFrameHandler3 { entry: @@ -1050,29 +1373,50 @@ if.end: ; Call to function @maybethrow may cause exception, so the load of %v3 can't ; be hoisted to block %if.else. define void @test22(i1 %cond, ptr %p1, ptr %p2) { -; CHECK-LABEL: @test22( -; CHECK-NEXT: entry: -; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] -; CHECK: if.then: -; CHECK-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8 -; CHECK-NEXT: [[DEC:%.*]] = add i64 [[V1]], -1 -; CHECK-NEXT: store i64 [[DEC]], ptr [[P1]], align 8 -; CHECK-NEXT: br label [[IF_END:%.*]] -; CHECK: if.end: -; CHECK-NEXT: [[V2:%.*]] = phi i64 [ [[V2_PRE:%.*]], [[IF_ELSE_IF_END_CRIT_EDGE:%.*]] ], [ [[DEC]], [[IF_THEN]] ] -; CHECK-NEXT: store i64 [[V2]], ptr [[P2:%.*]], align 8 -; CHECK-NEXT: ret void -; CHECK: if.else: -; CHECK-NEXT: [[COND2:%.*]] = call i1 @foo() -; CHECK-NEXT: br i1 [[COND2]], label [[IF_ELSE_IF_END_CRIT_EDGE]], label [[EXIT:%.*]] -; CHECK: if.else.if.end_crit_edge: -; CHECK-NEXT: [[V2_PRE]] = load i64, ptr [[P1]], align 8 -; CHECK-NEXT: br label [[IF_END]] -; CHECK: exit: -; CHECK-NEXT: [[_:%.*]] = call i1 @maybethrow() -; CHECK-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8 -; CHECK-NEXT: store i64 [[V3]], ptr [[P2]], align 8 -; CHECK-NEXT: ret void +; MDEP-LABEL: @test22( +; MDEP-NEXT: entry: +; MDEP-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; MDEP: if.then: +; MDEP-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8 +; MDEP-NEXT: [[DEC:%.*]] = add i64 [[V1]], -1 +; MDEP-NEXT: store i64 [[DEC]], ptr [[P1]], align 8 +; MDEP-NEXT: br label [[IF_END:%.*]] +; MDEP: if.end: +; MDEP-NEXT: [[V2:%.*]] = phi i64 [ [[V2_PRE:%.*]], [[IF_ELSE_IF_END_CRIT_EDGE:%.*]] ], [ [[DEC]], [[IF_THEN]] ] +; MDEP-NEXT: store i64 [[V2]], ptr [[P2:%.*]], align 8 +; MDEP-NEXT: ret void +; MDEP: if.else: +; MDEP-NEXT: [[COND2:%.*]] = call i1 @foo() +; MDEP-NEXT: br i1 [[COND2]], label [[IF_ELSE_IF_END_CRIT_EDGE]], label [[EXIT:%.*]] +; MDEP: if.else.if.end_crit_edge: +; MDEP-NEXT: [[V2_PRE]] = load i64, ptr [[P1]], align 8 +; MDEP-NEXT: br label [[IF_END]] +; MDEP: exit: +; MDEP-NEXT: [[_:%.*]] = call i1 @maybethrow() +; MDEP-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8 +; MDEP-NEXT: store i64 [[V3]], ptr [[P2]], align 8 +; MDEP-NEXT: ret void +; +; MSSA-LABEL: @test22( +; MSSA-NEXT: entry: +; MSSA-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; MSSA: if.then: +; MSSA-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8 +; MSSA-NEXT: [[DEC:%.*]] = add i64 [[V1]], -1 +; MSSA-NEXT: store i64 [[DEC]], ptr [[P1]], align 8 +; MSSA-NEXT: br label [[IF_END:%.*]] +; MSSA: if.end: +; MSSA-NEXT: [[V2:%.*]] = load i64, ptr [[P1]], align 8 +; MSSA-NEXT: store i64 [[V2]], ptr [[P2:%.*]], align 8 +; MSSA-NEXT: ret void +; MSSA: if.else: +; MSSA-NEXT: [[COND2:%.*]] = call i1 @foo() +; MSSA-NEXT: br i1 [[COND2]], label [[IF_END]], label [[EXIT:%.*]] +; MSSA: exit: +; MSSA-NEXT: [[_:%.*]] = call i1 @maybethrow() +; MSSA-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8 +; MSSA-NEXT: store i64 [[V3]], ptr [[P2]], align 8 +; MSSA-NEXT: ret void ; entry: br i1 %cond, label %if.then, label %if.else @@ -1106,21 +1450,38 @@ declare void @maybethrow() readnone ; also be replaced by ValuesPerBlock(BB, NewLoad). So we'll not use the deleted ; OldLoad in later PHI instruction. define void @test23(i1 %cond1, i1 %cond2) { -; CHECK-LABEL: @test23( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[G:%.*]] = alloca i64, align 8 -; CHECK-NEXT: [[VAL1_PRE:%.*]] = load i64, ptr @B, align 8 -; CHECK-NEXT: br i1 [[COND2:%.*]], label [[THEN:%.*]], label [[WRONG:%.*]] -; CHECK: then: -; CHECK-NEXT: br i1 [[COND1:%.*]], label [[STORE:%.*]], label [[EXIT:%.*]] -; CHECK: store: -; CHECK-NEXT: store i64 [[VAL1_PRE]], ptr @B, align 8 -; CHECK-NEXT: br label [[WRONG]] -; CHECK: wrong: -; CHECK-NEXT: store i64 [[VAL1_PRE]], ptr [[G]], align 8 -; CHECK-NEXT: ret void -; CHECK: exit: -; CHECK-NEXT: ret void +; MDEP-LABEL: @test23( +; MDEP-NEXT: entry: +; MDEP-NEXT: [[G:%.*]] = alloca i64, align 8 +; MDEP-NEXT: [[VAL1_PRE:%.*]] = load i64, ptr @B, align 8 +; MDEP-NEXT: br i1 [[COND2:%.*]], label [[THEN:%.*]], label [[WRONG:%.*]] +; MDEP: then: +; MDEP-NEXT: br i1 [[COND1:%.*]], label [[STORE:%.*]], label [[EXIT:%.*]] +; MDEP: store: +; MDEP-NEXT: store i64 [[VAL1_PRE]], ptr @B, align 8 +; MDEP-NEXT: br label [[WRONG]] +; MDEP: wrong: +; MDEP-NEXT: store i64 [[VAL1_PRE]], ptr [[G]], align 8 +; MDEP-NEXT: ret void +; MDEP: exit: +; MDEP-NEXT: ret void +; +; MSSA-LABEL: @test23( +; MSSA-NEXT: entry: +; MSSA-NEXT: [[G:%.*]] = alloca i64, align 8 +; MSSA-NEXT: br i1 [[COND2:%.*]], label [[THEN:%.*]], label [[WRONG:%.*]] +; MSSA: then: +; MSSA-NEXT: [[VAL2:%.*]] = load i64, ptr @B, align 8 +; MSSA-NEXT: br i1 [[COND1:%.*]], label [[STORE:%.*]], label [[EXIT:%.*]] +; MSSA: store: +; MSSA-NEXT: store i64 [[VAL2]], ptr @B, align 8 +; MSSA-NEXT: br label [[WRONG]] +; MSSA: wrong: +; MSSA-NEXT: [[VAL1:%.*]] = load i64, ptr @B, align 8 +; MSSA-NEXT: store i64 [[VAL1]], ptr [[G]], align 8 +; MSSA-NEXT: ret void +; MSSA: exit: +; MSSA-NEXT: ret void ; entry: %G = alloca i64, align 8 diff --git a/llvm/test/Transforms/GVN/PRE/pre-loop-load-new-pm.ll b/llvm/test/Transforms/GVN/PRE/pre-loop-load-new-pm.ll index e16c21e..4cd2e47 100644 --- a/llvm/test/Transforms/GVN/PRE/pre-loop-load-new-pm.ll +++ b/llvm/test/Transforms/GVN/PRE/pre-loop-load-new-pm.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -aa-pipeline=basic-aa -enable-load-pre -enable-pre -passes=gvn -S < %s | FileCheck %s +; RUN: opt -aa-pipeline=basic-aa -enable-load-pre -enable-pre -passes=gvn -S < %s | FileCheck %s --check-prefixes=CHECK,MDEP +; RUN: opt -aa-pipeline=basic-aa -enable-load-pre -enable-pre -passes='gvn<memoryssa>' -S < %s | FileCheck %s --check-prefixes=CHECK,MSSA declare void @side_effect() declare i1 @side_effect_cond() @@ -216,7 +217,7 @@ define i32 @test_load_on_exiting_cold_path_02(ptr %p) gc "statepoint-example" pe ; CHECK-NEXT: br label [[BACKEDGE]] ; CHECK: cold_path: ; CHECK-NEXT: invoke void @side_effect() -; CHECK-NEXT: to label [[BACKEDGE]] unwind label [[COLD_EXIT:%.*]] +; CHECK-NEXT: to label [[BACKEDGE]] unwind label [[COLD_EXIT:%.*]] ; CHECK: backedge: ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X]] ; CHECK-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000 @@ -225,7 +226,7 @@ define i32 @test_load_on_exiting_cold_path_02(ptr %p) gc "statepoint-example" pe ; CHECK-NEXT: ret i32 [[X]] ; CHECK: cold_exit: ; CHECK-NEXT: [[LANDING_PAD:%.*]] = landingpad token -; CHECK-NEXT: cleanup +; CHECK-NEXT: cleanup ; CHECK-NEXT: ret i32 -1 ; entry: @@ -447,7 +448,7 @@ define i32 @test_inner_loop(ptr %p, i1 %arg) { ; CHECK-NEXT: br label [[INNER_LOOP:%.*]] ; CHECK: inner_loop: ; CHECK-NEXT: call void @side_effect() -; CHECK-NEXT: br i1 %arg, label [[INNER_LOOP]], label [[BACKEDGE]] +; CHECK-NEXT: br i1 [[ARG:%.*]], label [[INNER_LOOP]], label [[BACKEDGE]] ; CHECK: backedge: ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X]] ; CHECK-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000 @@ -633,3 +634,6 @@ exit: cold_exit: ret i32 -1 } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; MDEP: {{.*}} +; MSSA: {{.*}} diff --git a/llvm/test/Transforms/GVN/PRE/pre-no-cost-phi.ll b/llvm/test/Transforms/GVN/PRE/pre-no-cost-phi.ll index 2009c29..22c628b 100644 --- a/llvm/test/Transforms/GVN/PRE/pre-no-cost-phi.ll +++ b/llvm/test/Transforms/GVN/PRE/pre-no-cost-phi.ll @@ -1,4 +1,6 @@ -; RUN: opt < %s -passes=gvn -S | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=gvn -S | FileCheck %s --check-prefixes=CHECK,MDEP +; RUN: opt < %s -passes='gvn<memoryssa>' -S | FileCheck %s --check-prefixes=CHECK,MSSA ; This testcase tests insertion of no-cost phis. That is, ; when the value is already available in every predecessor, ; and we just need to insert a phi node to merge the available values. @@ -8,6 +10,22 @@ define i32 @mai(i32 %foo, i32 %a, i32 %b) { +; CHECK-LABEL: define i32 @mai( +; CHECK-SAME: i32 [[FOO:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[FOO]], 0 +; CHECK-NEXT: br i1 [[TMP1]], label %[[BB1:.*]], label %[[BB2:.*]] +; CHECK: [[BB1]]: +; CHECK-NEXT: [[TMP2:%.*]] = add nsw i32 [[A]], [[B]] +; CHECK-NEXT: store i32 [[TMP2]], ptr @c, align 4 +; CHECK-NEXT: br label %[[MERGEBLOCK:.*]] +; CHECK: [[BB2]]: +; CHECK-NEXT: [[TMP3:%.*]] = add nsw i32 [[A]], [[B]] +; CHECK-NEXT: store i32 [[TMP3]], ptr @d, align 4 +; CHECK-NEXT: br label %[[MERGEBLOCK]] +; CHECK: [[MERGEBLOCK]]: +; CHECK-NEXT: [[DOTPRE_PHI:%.*]] = phi i32 [ [[TMP3]], %[[BB2]] ], [ [[TMP2]], %[[BB1]] ] +; CHECK-NEXT: ret i32 [[DOTPRE_PHI]] +; %1 = icmp ne i32 %foo, 0 br i1 %1, label %bb1, label %bb2 @@ -22,10 +40,11 @@ bb2: br label %mergeblock mergeblock: -; CHECK: pre-phi = phi i32 [ %3, %bb2 ], [ %2, %bb1 ] -; CHECK-NEXT: ret i32 %.pre-phi %4 = add nsw i32 %a, %b ret i32 %4 } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; MDEP: {{.*}} +; MSSA: {{.*}} diff --git a/llvm/test/Transforms/GVN/PRE/pre-poison-add.ll b/llvm/test/Transforms/GVN/PRE/pre-poison-add.ll index d17c459..32f149b 100644 --- a/llvm/test/Transforms/GVN/PRE/pre-poison-add.ll +++ b/llvm/test/Transforms/GVN/PRE/pre-poison-add.ll @@ -1,52 +1,77 @@ -; RUN: opt < %s -passes=gvn -enable-pre -S | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=gvn -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP +; RUN: opt < %s -passes='gvn<memoryssa>' -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA @H = common global i32 0 @G = common global i32 0 define i32 @test1(i1 %cond, i32 %v) nounwind { -; CHECK-LABEL: @test1 +; CHECK-LABEL: define i32 @test1( +; CHECK-SAME: i1 [[COND:%.*]], i32 [[V:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br i1 [[COND]], label %[[BB:.*]], label %[[BB1:.*]] +; CHECK: [[BB]]: +; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[V]], 42 +; CHECK-NEXT: store i32 [[ADD_1]], ptr @G, align 4 +; CHECK-NEXT: br label %[[RETURN:.*]] +; CHECK: [[BB1]]: +; CHECK-NEXT: [[DOTPRE:%.*]] = add i32 [[V]], 42 +; CHECK-NEXT: br label %[[RETURN]] +; CHECK: [[RETURN]]: +; CHECK-NEXT: [[ADD_2_PRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], %[[BB1]] ], [ [[ADD_1]], %[[BB]] ] +; CHECK-NEXT: store i32 [[ADD_2_PRE_PHI]], ptr @H, align 4 +; CHECK-NEXT: ret i32 0 +; entry: - br i1 %cond, label %bb, label %bb1 + br i1 %cond, label %bb, label %bb1 bb: - %add.1 = add nuw nsw i32 %v, 42 -; CHECK: %add.1 = add i32 %v, 42 - store i32 %add.1, ptr @G, align 4 - br label %return + %add.1 = add nuw nsw i32 %v, 42 + store i32 %add.1, ptr @G, align 4 + br label %return bb1: -; CHECK: %.pre = add i32 %v, 42 - br label %return + br label %return return: -; CHECK: %add.2.pre-phi = phi i32 [ %.pre, %bb1 ], [ %add.1, %bb ] -; CHECK-NEXT: store i32 %add.2.pre-phi, ptr @H, align 4 -; CHECK-NEXT: ret i32 0 - %add.2 = add i32 %v, 42 - store i32 %add.2, ptr @H, align 4 - ret i32 0 + %add.2 = add i32 %v, 42 + store i32 %add.2, ptr @H, align 4 + ret i32 0 } define i32 @test2(i1 %cond, i32 %v) nounwind { -; CHECK-LABEL: @test2 +; CHECK-LABEL: define i32 @test2( +; CHECK-SAME: i1 [[COND:%.*]], i32 [[V:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br i1 [[COND]], label %[[BB:.*]], label %[[BB1:.*]] +; CHECK: [[BB]]: +; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[V]], 42 +; CHECK-NEXT: store i32 [[ADD_1]], ptr @G, align 4 +; CHECK-NEXT: br label %[[RETURN:.*]] +; CHECK: [[BB1]]: +; CHECK-NEXT: [[DOTPRE:%.*]] = add nuw nsw i32 [[V]], 42 +; CHECK-NEXT: br label %[[RETURN]] +; CHECK: [[RETURN]]: +; CHECK-NEXT: [[ADD_2_PRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], %[[BB1]] ], [ [[ADD_1]], %[[BB]] ] +; CHECK-NEXT: store i32 [[ADD_2_PRE_PHI]], ptr @H, align 4 +; CHECK-NEXT: ret i32 0 +; entry: - br i1 %cond, label %bb, label %bb1 + br i1 %cond, label %bb, label %bb1 bb: - %add.1 = add i32 %v, 42 -; CHECK: %add.1 = add i32 %v, 42 - store i32 %add.1, ptr @G, align 4 - br label %return + %add.1 = add i32 %v, 42 + store i32 %add.1, ptr @G, align 4 + br label %return bb1: -; CHECK: %.pre = add nuw nsw i32 %v, 42 - br label %return + br label %return return: -; CHECK: %add.2.pre-phi = phi i32 [ %.pre, %bb1 ], [ %add.1, %bb ] -; CHECK-NEXT: store i32 %add.2.pre-phi, ptr @H, align 4 -; CHECK-NEXT: ret i32 0 - %add.2 = add nuw nsw i32 %v, 42 - store i32 %add.2, ptr @H, align 4 - ret i32 0 + %add.2 = add nuw nsw i32 %v, 42 + store i32 %add.2, ptr @H, align 4 + ret i32 0 } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; MDEP: {{.*}} +; MSSA: {{.*}} diff --git a/llvm/test/Transforms/GVN/PRE/pre-single-pred.ll b/llvm/test/Transforms/GVN/PRE/pre-single-pred.ll index 7342925..74bc6bc 100644 --- a/llvm/test/Transforms/GVN/PRE/pre-single-pred.ll +++ b/llvm/test/Transforms/GVN/PRE/pre-single-pred.ll @@ -1,4 +1,6 @@ -; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP +; RUN: opt < %s -passes='gvn<memoryssa>' -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA ; RUN: opt < %s -passes="gvn<load-pre>" -enable-load-pre=false -S | FileCheck %s ; This testcase assumed we'll PRE the load into %for.cond, but we don't actually ; verify that doing so is safe. If there didn't _happen_ to be a load in @@ -12,35 +14,85 @@ @p = external global i32 define i32 @f(i32 %n) nounwind { +; MDEP-LABEL: define i32 @f( +; MDEP-SAME: i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { +; MDEP-NEXT: [[ENTRY:.*]]: +; MDEP-NEXT: br label %[[FOR_COND:.*]] +; MDEP: [[FOR_COND]]: +; MDEP-NEXT: [[I_0:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INDVAR_NEXT:%.*]], %[[FOR_INC:.*]] ] +; MDEP-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], [[N]] +; MDEP-NEXT: br i1 [[CMP]], label %[[FOR_BODY:.*]], label %[[FOR_COND_FOR_END_CRIT_EDGE:.*]] +; MDEP: [[FOR_COND_FOR_END_CRIT_EDGE]]: +; MDEP-NEXT: [[TMP9_PRE:%.*]] = load i32, ptr @p, align 4 +; MDEP-NEXT: br label %[[FOR_END:.*]] +; MDEP: [[FOR_BODY]]: +; MDEP-NEXT: [[TMP3:%.*]] = load i32, ptr @p, align 4 +; MDEP-NEXT: [[DEC:%.*]] = add i32 [[TMP3]], -1 +; MDEP-NEXT: store i32 [[DEC]], ptr @p, align 4 +; MDEP-NEXT: [[CMP6:%.*]] = icmp slt i32 [[DEC]], 0 +; MDEP-NEXT: br i1 [[CMP6]], label %[[FOR_BODY_FOR_END_CRIT_EDGE:.*]], label %[[FOR_INC]] +; MDEP: [[FOR_BODY_FOR_END_CRIT_EDGE]]: +; MDEP-NEXT: br label %[[FOR_END]] +; MDEP: [[FOR_INC]]: +; MDEP-NEXT: [[INDVAR_NEXT]] = add i32 [[I_0]], 1 +; MDEP-NEXT: br label %[[FOR_COND]] +; MDEP: [[FOR_END]]: +; MDEP-NEXT: [[TMP9:%.*]] = phi i32 [ [[DEC]], %[[FOR_BODY_FOR_END_CRIT_EDGE]] ], [ [[TMP9_PRE]], %[[FOR_COND_FOR_END_CRIT_EDGE]] ] +; MDEP-NEXT: ret i32 [[TMP9]] +; +; MSSA-LABEL: define i32 @f( +; MSSA-SAME: i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] { +; MSSA-NEXT: [[ENTRY:.*]]: +; MSSA-NEXT: br label %[[FOR_COND:.*]] +; MSSA: [[FOR_COND]]: +; MSSA-NEXT: [[I_0:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INDVAR_NEXT:%.*]], %[[FOR_INC:.*]] ] +; MSSA-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], [[N]] +; MSSA-NEXT: br i1 [[CMP]], label %[[FOR_BODY:.*]], label %[[FOR_COND_FOR_END_CRIT_EDGE:.*]] +; MSSA: [[FOR_COND_FOR_END_CRIT_EDGE]]: +; MSSA-NEXT: br label %[[FOR_END:.*]] +; MSSA: [[FOR_BODY]]: +; MSSA-NEXT: [[TMP3:%.*]] = load i32, ptr @p, align 4 +; MSSA-NEXT: [[DEC:%.*]] = add i32 [[TMP3]], -1 +; MSSA-NEXT: store i32 [[DEC]], ptr @p, align 4 +; MSSA-NEXT: [[CMP6:%.*]] = icmp slt i32 [[DEC]], 0 +; MSSA-NEXT: br i1 [[CMP6]], label %[[FOR_BODY_FOR_END_CRIT_EDGE:.*]], label %[[FOR_INC]] +; MSSA: [[FOR_BODY_FOR_END_CRIT_EDGE]]: +; MSSA-NEXT: br label %[[FOR_END]] +; MSSA: [[FOR_INC]]: +; MSSA-NEXT: [[INDVAR_NEXT]] = add i32 [[I_0]], 1 +; MSSA-NEXT: br label %[[FOR_COND]] +; MSSA: [[FOR_END]]: +; MSSA-NEXT: [[TMP9:%.*]] = load i32, ptr @p, align 4 +; MSSA-NEXT: ret i32 [[TMP9]] +; entry: - br label %for.cond + br label %for.cond for.cond: ; preds = %for.inc, %entry - %i.0 = phi i32 [ 0, %entry ], [ %indvar.next, %for.inc ] ; <i32> [#uses=2] - %cmp = icmp slt i32 %i.0, %n ; <i1> [#uses=1] - br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge + %i.0 = phi i32 [ 0, %entry ], [ %indvar.next, %for.inc ] ; <i32> [#uses=2] + %cmp = icmp slt i32 %i.0, %n ; <i1> [#uses=1] + br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge for.cond.for.end_crit_edge: ; preds = %for.cond - br label %for.end + br label %for.end -; CHECK: for.body: -; CHECK-NEXT: %tmp3 = load i32, ptr @p for.body: ; preds = %for.cond - %tmp3 = load i32, ptr @p ; <i32> [#uses=1] - %dec = add i32 %tmp3, -1 ; <i32> [#uses=2] - store i32 %dec, ptr @p - %cmp6 = icmp slt i32 %dec, 0 ; <i1> [#uses=1] - br i1 %cmp6, label %for.body.for.end_crit_edge, label %for.inc + %tmp3 = load i32, ptr @p ; <i32> [#uses=1] + %dec = add i32 %tmp3, -1 ; <i32> [#uses=2] + store i32 %dec, ptr @p + %cmp6 = icmp slt i32 %dec, 0 ; <i1> [#uses=1] + br i1 %cmp6, label %for.body.for.end_crit_edge, label %for.inc -; CHECK: for.body.for.end_crit_edge: for.body.for.end_crit_edge: ; preds = %for.body - br label %for.end + br label %for.end for.inc: ; preds = %for.body - %indvar.next = add i32 %i.0, 1 ; <i32> [#uses=1] - br label %for.cond + %indvar.next = add i32 %i.0, 1 ; <i32> [#uses=1] + br label %for.cond for.end: ; preds = %for.body.for.end_crit_edge, %for.cond.for.end_crit_edge - %tmp9 = load i32, ptr @p ; <i32> [#uses=1] - ret i32 %tmp9 + %tmp9 = load i32, ptr @p ; <i32> [#uses=1] + ret i32 %tmp9 } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/Transforms/GVN/PRE/preserve-tbaa.ll b/llvm/test/Transforms/GVN/PRE/preserve-tbaa.ll index 3df63be..abbb17f 100644 --- a/llvm/test/Transforms/GVN/PRE/preserve-tbaa.ll +++ b/llvm/test/Transforms/GVN/PRE/preserve-tbaa.ll @@ -1,13 +1,45 @@ -; RUN: opt -passes=gvn -S < %s | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -passes=gvn -S < %s | FileCheck %s --check-prefixes=CHECK,MDEP +; RUN: opt -passes='gvn<memoryssa>' -S < %s | FileCheck %s --check-prefixes=CHECK,MSSA target datalayout = "e-p:64:64:64" ; GVN should preserve the TBAA tag on loads when doing PRE. -; CHECK-LABEL: @test( -; CHECK: %tmp33.pre = load i16, ptr %P, align 2, !tbaa !0 -; CHECK: br label %for.body define void @test(ptr %P, ptr %Q, i1 %arg) nounwind { +; MDEP-LABEL: define void @test( +; MDEP-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[ARG:%.*]]) #[[ATTR0:[0-9]+]] { +; MDEP-NEXT: [[ENTRY:.*:]] +; MDEP-NEXT: br i1 [[ARG]], label %[[BB_NPH:.*]], label %[[FOR_END:.*]] +; MDEP: [[BB_NPH]]: +; MDEP-NEXT: [[TMP33_PRE:%.*]] = load i16, ptr [[P]], align 2, !tbaa [[TBAA0:![0-9]+]] +; MDEP-NEXT: br label %[[FOR_BODY:.*]] +; MDEP: [[FOR_BODY]]: +; MDEP-NEXT: [[TMP33:%.*]] = phi i16 [ 0, %[[FOR_BODY]] ], [ [[TMP33_PRE]], %[[BB_NPH]] ] +; MDEP-NEXT: store i16 [[TMP33]], ptr [[Q]], align 2 +; MDEP-NEXT: store i16 0, ptr [[P]], align 2, !tbaa [[TBAA0]] +; MDEP-NEXT: br i1 false, label %[[FOR_BODY_FOR_END_CRIT_EDGE:.*]], label %[[FOR_BODY]] +; MDEP: [[FOR_BODY_FOR_END_CRIT_EDGE]]: +; MDEP-NEXT: br label %[[FOR_END]] +; MDEP: [[FOR_END]]: +; MDEP-NEXT: ret void +; +; MSSA-LABEL: define void @test( +; MSSA-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[ARG:%.*]]) #[[ATTR0:[0-9]+]] { +; MSSA-NEXT: [[ENTRY:.*:]] +; MSSA-NEXT: br i1 [[ARG]], label %[[BB_NPH:.*]], label %[[FOR_END:.*]] +; MSSA: [[BB_NPH]]: +; MSSA-NEXT: br label %[[FOR_BODY:.*]] +; MSSA: [[FOR_BODY]]: +; MSSA-NEXT: [[TMP33:%.*]] = load i16, ptr [[P]], align 2, !tbaa [[TBAA0:![0-9]+]] +; MSSA-NEXT: store i16 [[TMP33]], ptr [[Q]], align 2 +; MSSA-NEXT: store i16 0, ptr [[P]], align 2, !tbaa [[TBAA0]] +; MSSA-NEXT: br i1 false, label %[[FOR_BODY_FOR_END_CRIT_EDGE:.*]], label %[[FOR_BODY]] +; MSSA: [[FOR_BODY_FOR_END_CRIT_EDGE]]: +; MSSA-NEXT: br label %[[FOR_END]] +; MSSA: [[FOR_END]]: +; MSSA-NEXT: ret void +; entry: br i1 %arg, label %bb.nph, label %for.end @@ -29,3 +61,16 @@ for.end: ; preds = %for.body, %entry !1 = !{!"omnipotent char", !2} !2 = !{!"Simple C/C++ TBAA"} !3 = !{!"short", !1} +;. +; MDEP: [[TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0} +; MDEP: [[META1]] = !{!"short", [[META2:![0-9]+]]} +; MDEP: [[META2]] = !{!"omnipotent char", [[META3:![0-9]+]]} +; MDEP: [[META3]] = !{!"Simple C/C++ TBAA"} +;. +; MSSA: [[TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0} +; MSSA: [[META1]] = !{!"short", [[META2:![0-9]+]]} +; MSSA: [[META2]] = !{!"omnipotent char", [[META3:![0-9]+]]} +; MSSA: [[META3]] = !{!"Simple C/C++ TBAA"} +;. +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/dot.ll b/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/dot.ll deleted file mode 100644 index b537b7b..0000000 --- a/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/dot.ll +++ /dev/null @@ -1,56 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 - -; RUN: opt -passes=instsimplify -S < %s | FileCheck %s - -; Test that intrinsics wasm dot call are constant folded - -target triple = "wasm32-unknown-unknown" - - -define <4 x i32> @dot_zero() { -; CHECK-LABEL: define <4 x i32> @dot_zero() { -; CHECK-NEXT: ret <4 x i32> zeroinitializer -; - %res = tail call <4 x i32> @llvm.wasm.dot(<8 x i16> zeroinitializer, <8 x i16> zeroinitializer) - ret <4 x i32> %res -} - -; a = 1 2 3 4 5 6 7 8 -; b = 1 2 3 4 5 6 7 8 -; k1|k2 = a * b = 1 4 9 16 25 36 49 64 -; k1 + k2 = (1+4) | (9 + 16) | (25 + 36) | (49 + 64) -; result = 5 | 25 | 61 | 113 -define <4 x i32> @dot_nonzero() { -; CHECK-LABEL: define <4 x i32> @dot_nonzero() { -; CHECK-NEXT: ret <4 x i32> <i32 5, i32 25, i32 61, i32 113> -; - %res = tail call <4 x i32> @llvm.wasm.dot(<8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>) - ret <4 x i32> %res -} - -define <4 x i32> @dot_doubly_negative() { -; CHECK-LABEL: define <4 x i32> @dot_doubly_negative() { -; CHECK-NEXT: ret <4 x i32> splat (i32 2) -; - %res = tail call <4 x i32> @llvm.wasm.dot(<8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>) - ret <4 x i32> %res -} - -; Tests that i16 max signed values fit in i32 -define <4 x i32> @dot_follow_modulo_spec_1() { -; CHECK-LABEL: define <4 x i32> @dot_follow_modulo_spec_1() { -; CHECK-NEXT: ret <4 x i32> <i32 2147352578, i32 0, i32 0, i32 0> -; - %res = tail call <4 x i32> @llvm.wasm.dot(<8 x i16> <i16 32767, i16 32767, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <8 x i16> <i16 32767, i16 32767, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>) - ret <4 x i32> %res -} - -; Tests that i16 min signed values fit in i32 -define <4 x i32> @dot_follow_modulo_spec_2() { -; CHECK-LABEL: define <4 x i32> @dot_follow_modulo_spec_2() { -; CHECK-NEXT: ret <4 x i32> <i32 -2147483648, i32 0, i32 0, i32 0> -; - %res = tail call <4 x i32> @llvm.wasm.dot(<8 x i16> <i16 -32768, i16 -32768, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <8 x i16> <i16 -32768, i16 -32768, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>) - ret <4 x i32> %res -} - diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll index 795de3d..a8d9a0c 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll @@ -45,8 +45,8 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,1 ; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[DST]], [[ENTRY]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[DST]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3 ; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP19]] ; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[SHR3]] to i8 @@ -128,8 +128,8 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range ; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[DST]], [[FOR_BODY_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[DST]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3 ; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP19]] ; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[SHR3]] to i8 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll index 0232d88..4b895ae 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll @@ -459,7 +459,7 @@ define void @latch_branch_cost(ptr %dst) { ; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; PRED-NEXT: br label %[[LOOP:.*]] ; PRED: [[LOOP]]: -; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; PRED-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]] ; PRED-NEXT: store i8 0, ptr [[GEP]], align 1 ; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 @@ -738,8 +738,8 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 { ; PRED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; PRED-NEXT: br label %[[LOOP:.*]] ; PRED: [[LOOP]]: -; PRED-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; PRED-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[DST]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ] +; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; PRED-NEXT: [[L:%.*]] = load i16, ptr [[SRC]], align 2 ; PRED-NEXT: [[O:%.*]] = or i16 [[L]], 1 ; PRED-NEXT: [[CONV:%.*]] = uitofp i16 [[O]] to double @@ -865,7 +865,7 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) { ; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; DEFAULT-NEXT: br label %[[LOOP:.*]] ; DEFAULT: [[LOOP]]: -; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; DEFAULT-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i8 ; DEFAULT-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]] ; DEFAULT-NEXT: store i8 [[IV_TRUNC]], ptr [[GEP]], align 1 @@ -967,7 +967,7 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) { ; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; PRED-NEXT: br label %[[LOOP:.*]] ; PRED: [[LOOP]]: -; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; PRED-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i8 ; PRED-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]] ; PRED-NEXT: store i8 [[IV_TRUNC]], ptr [[GEP]], align 1 @@ -1554,7 +1554,7 @@ define void @redundant_branch_and_tail_folding(ptr %dst, i1 %c) { ; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; PRED-NEXT: br label %[[LOOP_HEADER:.*]] ; PRED: [[LOOP_HEADER]]: -; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] ; PRED-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[THEN:.*]] ; PRED: [[THEN]]: ; PRED-NEXT: br label %[[LOOP_LATCH]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll index fff99f1..41a624b 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll @@ -75,8 +75,8 @@ define i32 @test_phi_iterator_invalidation(ptr %A, ptr noalias %B) { ; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, [[ENTRY]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[SEXT:%.*]] = sext i16 [[SCALAR_RECUR]] to i32 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV_NEXT]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll index 1471896..cc36cdb 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll @@ -397,7 +397,7 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n) ; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] ; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] ; DEFAULT-NEXT: [[TMP72:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i8 ; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP72]] ; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP72]], 1 @@ -546,7 +546,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] ; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] ; DEFAULT-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8 ; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]] ; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1 @@ -621,7 +621,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; OPTSIZE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]] ; OPTSIZE: [[FOR_BODY]]: -; OPTSIZE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; OPTSIZE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] ; OPTSIZE-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8 ; OPTSIZE-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]] ; OPTSIZE-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1 @@ -696,7 +696,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 ; MINSIZE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; MINSIZE-NEXT: br label %[[FOR_BODY:.*]] ; MINSIZE: [[FOR_BODY]]: -; MINSIZE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; MINSIZE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] ; MINSIZE-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8 ; MINSIZE-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]] ; MINSIZE-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll index 8495dee..b4df63d 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll @@ -1,47 +1,28 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph" --version 5 -; REQUIRES: asserts -; RUN: opt -passes=loop-vectorize -mtriple=aarch64 -mattr=+sve -S \ -; RUN: -debug-only=loop-vectorize %s 2>&1 | FileCheck %s +; RUN: opt -passes=loop-vectorize -mtriple=aarch64 -mattr=+sve -S %s | FileCheck %s -; FIXME: Hoisted vector code should be costed with scalable cost. -; In this example, `<vscale x 4 x float> @llvm.minimumnum` has an invalid cost, -; and hence should not be produced by LoopVectorize. - -; CHECK: LV: Found an estimated cost of Invalid for VF vscale x 4 For instruction: %res = tail call float @llvm.minimumnum.f32(float %arg, float 0.000000e+00) define void @cost_hoisted_vector_code(ptr %p, float %arg) { ; CHECK-LABEL: define void @cost_hoisted_vector_code( ; CHECK-SAME: ptr [[P:%.*]], float [[ARG:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 ; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 -1, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 -1, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[ARG]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP6:%.*]] = add i64 1, [[N_VEC]] -; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 4 x float> @llvm.minimumnum.nxv4f32(<vscale x 4 x float> [[BROADCAST_SPLAT]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[ARG]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> zeroinitializer) ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[INDEX:%.*]] = add i64 1, [[INDEX1]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[P]], i64 [[INDEX]] -; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr float, ptr [[TMP8]], i64 [[TMP10]] -; CHECK-NEXT: store <vscale x 4 x float> [[TMP7]], ptr [[TMP8]], align 4 -; CHECK-NEXT: store <vscale x 4 x float> [[TMP7]], ptr [[TMP11]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP5]] -; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[TMP8]], i32 4 +; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[TMP8]], align 4 +; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[TMP2]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], 8 +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], -8 +; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 -1, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll index d9a3a71..830e7da 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll @@ -59,8 +59,8 @@ define i32 @pr70988(ptr %src, i32 %n) { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[INDUC:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDUC_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[MAX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[TMP24:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[INDUC:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDUC_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[MAX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[TMP24:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[INDUC]] ; CHECK-NEXT: [[TMP22:%.*]] = load ptr, ptr [[GEP]], align 8 ; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll index 08d35f7..381d2e1 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll @@ -256,10 +256,10 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2) ; PRED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ] ; PRED-NEXT: br label %[[LOOP:.*]] ; PRED: [[LOOP]]: -; PRED-NEXT: [[TMP45:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP53:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[SCALAR_RECUR10:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT8]], %[[SCALAR_PH]] ], [ [[TMP45]], %[[LOOP]] ] -; PRED-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[SUM_RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_2:%.*]], %[[LOOP]] ] +; PRED-NEXT: [[TMP45:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[TMP53:%.*]], %[[LOOP]] ] +; PRED-NEXT: [[SCALAR_RECUR10:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[TMP45]], %[[LOOP]] ] +; PRED-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[LOOP]] ] +; PRED-NEXT: [[SUM_RED:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[RED_2:%.*]], %[[LOOP]] ] ; PRED-NEXT: [[TMP52:%.*]] = add i64 [[Y]], 1 ; PRED-NEXT: [[GEP_1:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP52]] ; PRED-NEXT: [[TMP53]] = load i32, ptr [[GEP_1]], align 4 @@ -491,8 +491,8 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 { ; PRED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i16 [ 0, %[[ENTRY]] ] ; PRED-NEXT: br label %[[LOOP:.*]] ; PRED: [[LOOP]]: -; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; PRED-NEXT: [[RED:%.*]] = phi i16 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] +; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; PRED-NEXT: [[RED:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] ; PRED-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]] ; PRED-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2 ; PRED-NEXT: [[DIV:%.*]] = udiv i16 [[L]], [[X]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll index a60d35d..0cad053 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll @@ -159,8 +159,8 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ] ; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP15]], [[SUM_07]] @@ -420,8 +420,8 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ] ; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; CHECK-ORDERED-TF-NEXT: [[TMP45:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP45]], [[SUM_07]] @@ -673,9 +673,9 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali ; CHECK-ORDERED-TF-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY]] ] ; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[ADD_PHI1:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[ADD_PHI1:%.*]] = phi float [ [[A2]], [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[A1]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] ; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4 ; CHECK-ORDERED-TF-NEXT: [[ADD1]] = fadd float [[TMP22]], [[ADD_PHI2]] @@ -918,8 +918,8 @@ define float @fadd_of_sum(ptr noalias nocapture readonly %a, ptr noalias nocaptu ; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] ; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-ORDERED-TF-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-ORDERED-TF-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ] ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] @@ -1148,8 +1148,8 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no ; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 1.000000e+00, [[ENTRY]] ] ; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ] +; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] +; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 1.000000e+00, [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ] ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] ; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-ORDERED-TF-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP18]], 0.000000e+00 @@ -1623,8 +1623,8 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ] ; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; CHECK-ORDERED-TF-NEXT: [[TMP59:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] @@ -1945,8 +1945,8 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 { ; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ] ; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-ORDERED-TF: for.body: -; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; CHECK-ORDERED-TF-NEXT: [[TMP59:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll index 51efbe9..d32b898 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll @@ -114,7 +114,7 @@ define void @cost_store_i8(ptr %dst) #0 { ; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; PRED-NEXT: br label [[LOOP:%.*]] ; PRED: loop: -; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; PRED-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]] ; PRED-NEXT: store i8 0, ptr [[GEP]], align 1 ; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll index 20bc0af..76a7536 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt < %s -mtriple=aarch64-none-elf -mcpu=cortex-a510 -mattr=+sve -passes=loop-vectorize -S | FileCheck %s --check-prefix=CHECK-CA510 ; RUN: opt < %s -mtriple=aarch64-none-elf -mcpu=cortex-a520 -mattr=+sve -passes=loop-vectorize -S | FileCheck %s --check-prefix=CHECK-CA520 +; RUN: opt < %s -mtriple=aarch64-none-elf -mcpu=cortex-a320 -mattr=+sve -passes=loop-vectorize -S | FileCheck %s --check-prefix=CHECK-CA320 define void @sve_add(ptr %dst, ptr %a, ptr %b, i64 %n) { ; CHECK-CA510-LABEL: define void @sve_add( @@ -131,6 +132,70 @@ define void @sve_add(ptr %dst, ptr %a, ptr %b, i64 %n) { ; CHECK-CA520: [[FOR_COND_CLEANUP]]: ; CHECK-CA520-NEXT: ret void ; +; CHECK-CA320-LABEL: define void @sve_add( +; CHECK-CA320-SAME: ptr [[DST:%.*]], ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-CA320-NEXT: [[ENTRY:.*:]] +; CHECK-CA320-NEXT: [[B3:%.*]] = ptrtoint ptr [[B]] to i64 +; CHECK-CA320-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; CHECK-CA320-NEXT: [[DST1:%.*]] = ptrtoint ptr [[DST]] to i64 +; CHECK-CA320-NEXT: [[CMP9_NOT:%.*]] = icmp eq i64 [[N]], 0 +; CHECK-CA320-NEXT: br i1 [[CMP9_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]] +; CHECK-CA320: [[FOR_BODY_PREHEADER]]: +; CHECK-CA320-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8 +; CHECK-CA320-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; CHECK-CA320: [[VECTOR_MEMCHECK]]: +; CHECK-CA320-NEXT: [[TMP0:%.*]] = sub i64 [[DST1]], [[A2]] +; CHECK-CA320-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 32 +; CHECK-CA320-NEXT: [[TMP1:%.*]] = sub i64 [[DST1]], [[B3]] +; CHECK-CA320-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], 32 +; CHECK-CA320-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; CHECK-CA320-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK-CA320: [[VECTOR_PH]]: +; CHECK-CA320-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 8 +; CHECK-CA320-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-CA320-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK-CA320: [[VECTOR_BODY]]: +; CHECK-CA320-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-CA320-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]] +; CHECK-CA320-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 4 +; CHECK-CA320-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 +; CHECK-CA320-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP3]], align 4 +; CHECK-CA320-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX]] +; CHECK-CA320-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP4]], i32 4 +; CHECK-CA320-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP4]], align 4 +; CHECK-CA320-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP5]], align 4 +; CHECK-CA320-NEXT: [[TMP6:%.*]] = fadd fast <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD]] +; CHECK-CA320-NEXT: [[TMP7:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD5]] +; CHECK-CA320-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX]] +; CHECK-CA320-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i32 4 +; CHECK-CA320-NEXT: store <4 x float> [[TMP6]], ptr [[TMP8]], align 4 +; CHECK-CA320-NEXT: store <4 x float> [[TMP7]], ptr [[TMP9]], align 4 +; CHECK-CA320-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; CHECK-CA320-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-CA320-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-CA320: [[MIDDLE_BLOCK]]: +; CHECK-CA320-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-CA320-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; CHECK-CA320: [[SCALAR_PH]]: +; CHECK-CA320-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[FOR_BODY_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; CHECK-CA320-NEXT: br label %[[FOR_BODY:.*]] +; CHECK-CA320: [[FOR_BODY]]: +; CHECK-CA320-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-CA320-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]] +; CHECK-CA320-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-CA320-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]] +; CHECK-CA320-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-CA320-NEXT: [[ADD:%.*]] = fadd fast float [[TMP12]], [[TMP11]] +; CHECK-CA320-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDVARS_IV]] +; CHECK-CA320-NEXT: store float [[ADD]], ptr [[ARRAYIDX4]], align 4 +; CHECK-CA320-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-CA320-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] +; CHECK-CA320-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-CA320: [[FOR_COND_CLEANUP_LOOPEXIT]]: +; CHECK-CA320-NEXT: br label %[[FOR_COND_CLEANUP]] +; CHECK-CA320: [[FOR_COND_CLEANUP]]: +; CHECK-CA320-NEXT: ret void +; entry: %cmp9.not = icmp eq i64 %n, 0 br i1 %cmp9.not, label %for.cond.cleanup, label %for.body @@ -160,3 +225,8 @@ for.cond.cleanup: ; preds = %for.cond.cleanup.lo ; CHECK-CA520: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK-CA520: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]} ;. +; CHECK-CA320: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; CHECK-CA320: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} +; CHECK-CA320: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK-CA320: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]} +;. diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll index ce7b78e..2b01018 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll @@ -1,81 +1,100 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph\:" --version 5 ; RUN: opt -passes=loop-vectorize -S < %s | FileCheck %s target triple = "aarch64-unknown-linux-gnu" define void @trip7_i64(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 { -; CHECK-LABEL: @trip7_i64( -; CHECK: = call i64 @llvm.vscale.i64() -; CHECK-NEXT: = mul nuw i64 -; CHECK: [[VSCALE:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[VF:%.*]] = mul nuw i64 [[VSCALE]], 2 -; CHECK: vector.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ] -; CHECK: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ {{%.*}}, %vector.ph ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %vector.body ] -; CHECK: {{%.*}} = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison) -; CHECK: {{%.*}} = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison) -; CHECK: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> {{%.*}}, ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[VF]] +; CHECK-LABEL: define void @trip7_i64( +; CHECK-SAME: ptr noalias noundef captures(none) [[DST:%.*]], ptr noalias noundef readonly captures(none) [[SRC:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 +; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 7, [[TMP2]] +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 +; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 7) +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP5]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison) +; CHECK-NEXT: [[TMP6:%.*]] = shl nsw <vscale x 2 x i64> [[WIDE_MASKED_LOAD]], splat (i64 1) +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison) +; CHECK-NEXT: [[TMP8:%.*]] = add nsw <vscale x 2 x i64> [[WIDE_MASKED_LOAD1]], [[TMP6]] +; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP8]], ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]] ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 7) -; CHECK-NEXT: [[ACTIVE_LANE_MASK_NOT:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) -; CHECK-NEXT: [[COND:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NOT]], i32 0 -; CHECK-NEXT: br i1 [[COND]], label %middle.block, label %vector.body +; CHECK-NEXT: [[TMP9:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true) +; CHECK-NEXT: [[TMP10:%.*]] = extractelement <vscale x 2 x i1> [[TMP9]], i32 0 +; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH]]: ; entry: - br label %for.body + br label %loop -for.body: ; preds = %entry, %for.body - %i.06 = phi i64 [ 0, %entry ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i64, ptr %src, i64 %i.06 - %0 = load i64, ptr %arrayidx, align 8 +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %gep.src = getelementptr inbounds i64, ptr %src, i64 %iv + %0 = load i64, ptr %gep.src, align 8 %mul = shl nsw i64 %0, 1 - %arrayidx1 = getelementptr inbounds i64, ptr %dst, i64 %i.06 - %1 = load i64, ptr %arrayidx1, align 8 + %gep.dst = getelementptr inbounds i64, ptr %dst, i64 %iv + %1 = load i64, ptr %gep.dst, align 8 %add = add nsw i64 %1, %mul - store i64 %add, ptr %arrayidx1, align 8 - %inc = add nuw nsw i64 %i.06, 1 - %exitcond.not = icmp eq i64 %inc, 7 - br i1 %exitcond.not, label %for.end, label %for.body + store i64 %add, ptr %gep.dst, align 8 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 7 + br i1 %ec, label %exit, label %loop -for.end: ; preds = %for.body +exit: ret void } define void @trip5_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 { -; CHECK-LABEL: @trip5_i8( -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[I_08]] -; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-LABEL: define void @trip5_i8( +; CHECK-SAME: ptr noalias noundef captures(none) [[DST:%.*]], ptr noalias noundef readonly captures(none) [[SRC:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[GEP_SRC]], align 1 ; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP0]], 1 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[I_08]] -; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 +; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[GEP_DST]], align 1 ; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP1]] -; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1 -; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 5 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]] -; CHECK: for.end: +; CHECK-NEXT: store i8 [[ADD]], ptr [[GEP_DST]], align 1 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 5 +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; entry: - br label %for.body + br label %loop -for.body: ; preds = %entry, %for.body - %i.08 = phi i64 [ 0, %entry ], [ %inc, %for.body ] - %arrayidx = getelementptr inbounds i8, ptr %src, i64 %i.08 - %0 = load i8, ptr %arrayidx, align 1 +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %gep.src = getelementptr inbounds i8, ptr %src, i64 %iv + %0 = load i8, ptr %gep.src, align 1 %mul = shl i8 %0, 1 - %arrayidx1 = getelementptr inbounds i8, ptr %dst, i64 %i.08 - %1 = load i8, ptr %arrayidx1, align 1 + %gep.dst = getelementptr inbounds i8, ptr %dst, i64 %iv + %1 = load i8, ptr %gep.dst, align 1 %add = add i8 %mul, %1 - store i8 %add, ptr %arrayidx1, align 1 - %inc = add nuw nsw i64 %i.08, 1 - %exitcond.not = icmp eq i64 %inc, 5 - br i1 %exitcond.not, label %for.end, label %for.body + store i8 %add, ptr %gep.dst, align 1 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 5 + br i1 %ec, label %exit, label %loop -for.end: ; preds = %for.body +exit: ret void } diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll index f4982e6..d6f8b8e 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll @@ -48,8 +48,8 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ] ; CHECK-NEXT: br label [[WHILE_BODY:%.*]] ; CHECK: while.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] ; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[GEP]], align 4 ; CHECK-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[VAL]] @@ -101,8 +101,8 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 { ; CHECK-IN-LOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ] ; CHECK-IN-LOOP-NEXT: br label [[WHILE_BODY:%.*]] ; CHECK-IN-LOOP: while.body: -; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-IN-LOOP-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] ; CHECK-IN-LOOP-NEXT: [[VAL:%.*]] = load i32, ptr [[GEP]], align 4 ; CHECK-IN-LOOP-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[VAL]] @@ -171,8 +171,8 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ] ; CHECK-NEXT: br label [[WHILE_BODY:%.*]] ; CHECK: while.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR]], i64 [[INDEX]] ; CHECK-NEXT: [[VAL:%.*]] = load float, ptr [[GEP]], align 4 ; CHECK-NEXT: [[RED_NEXT]] = fadd float [[RED]], [[VAL]] @@ -223,8 +223,8 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 { ; CHECK-IN-LOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ] ; CHECK-IN-LOOP-NEXT: br label [[WHILE_BODY:%.*]] ; CHECK-IN-LOOP: while.body: -; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ] ; CHECK-IN-LOOP-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR]], i64 [[INDEX]] ; CHECK-IN-LOOP-NEXT: [[VAL:%.*]] = load float, ptr [[GEP]], align 4 ; CHECK-IN-LOOP-NEXT: [[RED_NEXT]] = fadd float [[RED]], [[VAL]] @@ -298,8 +298,8 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 7, [[ENTRY]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 7, [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]] ; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP26]], 5 @@ -362,8 +362,8 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 { ; CHECK-IN-LOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 7, [[ENTRY]] ] ; CHECK-IN-LOOP-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-IN-LOOP: for.body: -; CHECK-IN-LOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; CHECK-IN-LOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ] +; CHECK-IN-LOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] +; CHECK-IN-LOOP-NEXT: [[RDX:%.*]] = phi i32 [ 7, [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ] ; CHECK-IN-LOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]] ; CHECK-IN-LOOP-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-IN-LOOP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP24]], 5 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll index 9929f35..5c6328e 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll @@ -35,7 +35,6 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START_2]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP6]] ; CHECK-NEXT: [[TMP13:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; CHECK-NEXT: [[TMP15:%.*]] = mul <vscale x 2 x i64> [[TMP13]], splat (i64 1) ; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP15]] @@ -48,6 +47,7 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias ; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], splat (i8 1) ; CHECK-NEXT: store <vscale x 2 x i8> [[TMP20]], ptr [[TMP18]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP6]] ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP11]] ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] @@ -119,7 +119,6 @@ define void @pointer_induction(ptr noalias %start, i64 %N) { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP6]] ; CHECK-NEXT: [[TMP12:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 2 x i64> [[TMP12]], splat (i64 1) ; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP14]] @@ -128,6 +127,7 @@ define void @pointer_induction(ptr noalias %start, i64 %N) { ; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], splat (i8 1) ; CHECK-NEXT: store <vscale x 2 x i8> [[TMP17]], ptr [[TMP15]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX2]], [[TMP6]] +; CHECK-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP6]] ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP10]] ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll index 6947884..2c88e0e 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll @@ -239,7 +239,6 @@ define i32 @pointer_iv_mixed(ptr noalias %a, ptr noalias %b, i64 %n) #0 { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[A]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP5]], 3 ; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; CHECK-NEXT: [[TMP10:%.*]] = shl <vscale x 2 x i64> [[TMP9]], splat (i64 2) ; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP10]] @@ -250,6 +249,7 @@ define i32 @pointer_iv_mixed(ptr noalias %a, ptr noalias %b, i64 %n) #0 { ; CHECK-NEXT: [[TMP12]] = add <vscale x 2 x i32> [[WIDE_LOAD]], [[VEC_PHI]] ; CHECK-NEXT: store <vscale x 2 x ptr> [[VECTOR_GEP]], ptr [[NEXT_GEP]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP5]], 3 ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP8]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] @@ -313,7 +313,6 @@ define void @phi_used_in_vector_compare_and_scalar_indvar_update_and_store(ptr % ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[PTR:%.*]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP0]], 2 ; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() ; CHECK-NEXT: [[TMP5:%.*]] = shl <vscale x 2 x i64> [[TMP4]], splat (i64 1) ; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP5]] @@ -321,6 +320,7 @@ define void @phi_used_in_vector_compare_and_scalar_indvar_update_and_store(ptr % ; CHECK-NEXT: [[TMP7:%.*]] = extractelement <vscale x 2 x ptr> [[VECTOR_GEP]], i64 0 ; CHECK-NEXT: call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> zeroinitializer, ptr [[TMP7]], i32 2, <vscale x 2 x i1> [[TMP6]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP0]], 2 ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP3]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll index a11896a..124abc6 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll @@ -80,7 +80,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; DATA-NEXT: br label [[WHILE_BODY:%.*]] ; DATA: while.body: -; DATA-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; DATA-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] ; DATA-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] ; DATA-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4 ; DATA-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 @@ -127,7 +127,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_NO_LANEMASK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; DATA_NO_LANEMASK-NEXT: br label [[WHILE_BODY:%.*]] ; DATA_NO_LANEMASK: while.body: -; DATA_NO_LANEMASK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; DATA_NO_LANEMASK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] ; DATA_NO_LANEMASK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] ; DATA_NO_LANEMASK-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4 ; DATA_NO_LANEMASK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 @@ -169,7 +169,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_AND_CONTROL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; DATA_AND_CONTROL-NEXT: br label [[WHILE_BODY:%.*]] ; DATA_AND_CONTROL: while.body: -; DATA_AND_CONTROL-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; DATA_AND_CONTROL-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] ; DATA_AND_CONTROL-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] ; DATA_AND_CONTROL-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4 ; DATA_AND_CONTROL-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 @@ -216,7 +216,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br label [[WHILE_BODY:%.*]] ; DATA_AND_CONTROL_NO_RT_CHECK: while.body: -; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]] ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4 ; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll index d0ea828..bd6a027 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll @@ -116,7 +116,7 @@ define void @load_store_interleave_group_tc_2(ptr noalias %data) { ; VF4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; VF4-NEXT: br label %[[LOOP:.*]] ; VF4: [[LOOP]]: -; VF4-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; VF4-NEXT: [[MUL_2:%.*]] = shl nsw i64 [[IV]], 1 ; VF4-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[MUL_2]] ; VF4-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll index 66bb80b..59e65f7 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll @@ -30,7 +30,7 @@ define void @test_stride1_4i32(ptr readonly %data, ptr noalias nocapture %dst, i ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 1 ; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2 ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]] @@ -218,7 +218,7 @@ define void @test_stride3_4i32(ptr readonly %data, ptr noalias nocapture %dst, i ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 3 ; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2 ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]] @@ -280,7 +280,7 @@ define void @test_stride4_4i32(ptr readonly %data, ptr noalias nocapture %dst, i ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 4 ; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2 ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]] diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll index 83cb325..fd94673 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll @@ -40,8 +40,8 @@ define i32 @mla_i32(ptr noalias nocapture readonly %A, ptr noalias nocapture rea ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011]] ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP12]] to i32 @@ -120,8 +120,8 @@ define i32 @mla_i8(ptr noalias nocapture readonly %A, ptr noalias nocapture read ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011]] ; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP12]] to i32 @@ -195,8 +195,8 @@ define i32 @add_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] ; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP7]], [[R_07]] @@ -260,8 +260,8 @@ define i32 @mul_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 1, [[FOR_BODY_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 1, [[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] ; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD]] = mul nsw i32 [[TMP7]], [[R_07]] @@ -325,8 +325,8 @@ define i32 @and_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ -1, [[FOR_BODY_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ -1, [[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] ; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD]] = and i32 [[TMP7]], [[R_07]] @@ -390,8 +390,8 @@ define i32 @or_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] ; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD]] = or i32 [[TMP7]], [[R_07]] @@ -455,8 +455,8 @@ define i32 @xor_i32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]] ; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD]] = xor i32 [[TMP7]], [[R_07]] @@ -520,8 +520,8 @@ define float @fadd_f32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[I_08]] ; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD]] = fadd fast float [[TMP7]], [[R_07]] @@ -585,8 +585,8 @@ define float @fmul_f32(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 1.000000e+00, [[FOR_BODY_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ 1.000000e+00, [[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[I_08]] ; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD]] = fmul fast float [[TMP7]], [[R_07]] diff --git a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll index 0f4d40f..8fbeff5 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll @@ -393,7 +393,7 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n) ; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; DEFAULT-NEXT: br label %[[FOR_BODY:.*]] ; DEFAULT: [[FOR_BODY]]: -; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] ; DEFAULT-NEXT: [[TMP72:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i8 ; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP72]] ; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP72]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll index 5f13089..2b93668 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll @@ -45,7 +45,7 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY1:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]] ; CHECK-NEXT: store i64 [[IV1]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 @@ -74,16 +74,50 @@ define void @test_wide_ptr_induction(ptr noalias %a, ptr noalias %b, i64 %N) { ; CHECK-LABEL: define void @test_wide_ptr_induction( ; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1 +; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]] +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[B]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() +; CHECK-NEXT: [[TMP6:%.*]] = mul <vscale x 2 x i64> [[TMP5]], splat (i64 8) +; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP6]] +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2p0.p0(<vscale x 2 x ptr> [[VECTOR_GEP]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[TMP11:%.*]] = mul i64 8, [[TMP10]] +; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[B]], [[ENTRY]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[VECTOR_BODY]] ], [ [[B]], [[VECTOR_PH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[B]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[ADDR]], i64 8 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: store ptr [[ADDR]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw nsw i64 [[EVL_BASED_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]] +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; @@ -109,4 +143,6 @@ for.cond.cleanup: ; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"} ; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]} +; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]], [[META3]]} +; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META3]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll index 6e2434a..1addff6 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll @@ -151,8 +151,8 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ] ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; IF-EVL-OUTLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; IF-EVL-OUTLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] +; IF-EVL-OUTLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] ; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_08]] ; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX]], align 2 ; IF-EVL-OUTLOOP-NEXT: [[CONV:%.*]] = sext i16 [[TMP13]] to i32 @@ -204,8 +204,8 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ] ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; IF-EVL-INLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; IF-EVL-INLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] +; IF-EVL-INLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] ; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_08]] ; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX]], align 2 ; IF-EVL-INLOOP-NEXT: [[CONV:%.*]] = sext i16 [[TMP13]] to i32 @@ -372,8 +372,8 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] +; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] ; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-OUTLOOP-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-OUTLOOP-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP19]], [[RDX]] @@ -419,8 +419,8 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] +; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] ; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-INLOOP-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-INLOOP-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP16]], [[RDX]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll index d6f16bf..056dc7e 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll @@ -87,6 +87,16 @@ entry: ; OPT-NF3: Cost of 7 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> ; OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> ; OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; OPT-NF3: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; OPT-NF3: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; OPT-NF3: Cost of 4 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; OPT-NF3: Cost of 4 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; OPT-NF3: Cost of 5 for VF vscale x 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; OPT-NF3: Cost of 5 for VF vscale x 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; OPT-NF3: Cost of 7 for VF vscale x 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; OPT-NF3: Cost of 7 for VF vscale x 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; OPT-NF3: Cost of 14 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; OPT-NF3: Cost of 14 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> ; NO-OPT-LABEL: Checking a loop in 'i8_factor_3' ; NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> ; NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> @@ -98,6 +108,16 @@ entry: ; NO-OPT: Cost of 48 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> ; NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> ; NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; NO-OPT: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; NO-OPT: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; NO-OPT: Cost of 12 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; NO-OPT: Cost of 12 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; NO-OPT: Cost of 24 for VF vscale x 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; NO-OPT: Cost of 24 for VF vscale x 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; NO-OPT: Cost of 48 for VF vscale x 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; NO-OPT: Cost of 48 for VF vscale x 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> +; NO-OPT: Cost of 96 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0> +; NO-OPT: Cost of 96 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0> for.body: %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] %p0 = getelementptr inbounds %i8.3, ptr %data, i64 %i, i32 0 @@ -135,6 +155,16 @@ entry: ; OPT-NF4: Cost of 8 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> ; OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> ; OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; OPT-NF4: Cost of 5 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; OPT-NF4: Cost of 5 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; OPT-NF4: Cost of 5 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; OPT-NF4: Cost of 5 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; OPT-NF4: Cost of 6 for VF vscale x 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; OPT-NF4: Cost of 6 for VF vscale x 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; OPT-NF4: Cost of 8 for VF vscale x 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; OPT-NF4: Cost of 8 for VF vscale x 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; OPT-NF4: Cost of 16 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; OPT-NF4: Cost of 16 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> ; NO-OPT-LABEL: Checking a loop in 'i8_factor_4' ; NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> ; NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> @@ -146,6 +176,16 @@ entry: ; NO-OPT: Cost of 64 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> ; NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> ; NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; NO-OPT: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; NO-OPT: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; NO-OPT: Cost of 16 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; NO-OPT: Cost of 16 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; NO-OPT: Cost of 32 for VF vscale x 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; NO-OPT: Cost of 32 for VF vscale x 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; NO-OPT: Cost of 64 for VF vscale x 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; NO-OPT: Cost of 64 for VF vscale x 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> +; NO-OPT: Cost of 128 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0> +; NO-OPT: Cost of 128 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0> for.body: %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] %p0 = getelementptr inbounds %i8.4, ptr %data, i64 %i, i32 0 @@ -185,6 +225,14 @@ entry: ; OPT-NF5: Cost of 9 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> ; OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> ; OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; OPT-NF5: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; OPT-NF5: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; OPT-NF5: Cost of 7 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; OPT-NF5: Cost of 7 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; OPT-NF5: Cost of 9 for VF vscale x 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; OPT-NF5: Cost of 9 for VF vscale x 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; OPT-NF5: Cost of 13 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; OPT-NF5: Cost of 13 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> ; NO-OPT-LABEL: Checking a loop in 'i8_factor_5' ; NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> ; NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> @@ -194,6 +242,14 @@ entry: ; NO-OPT: Cost of 40 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> ; NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> ; NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; NO-OPT: Cost of 10 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; NO-OPT: Cost of 10 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; NO-OPT: Cost of 20 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; NO-OPT: Cost of 20 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; NO-OPT: Cost of 40 for VF vscale x 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; NO-OPT: Cost of 40 for VF vscale x 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> +; NO-OPT: Cost of 80 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0> +; NO-OPT: Cost of 80 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0> for.body: %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] %p0 = getelementptr inbounds %i8.5, ptr %data, i64 %i, i32 0 @@ -237,6 +293,14 @@ entry: ; OPT-NF6: Cost of 10 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> ; OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> ; OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; OPT-NF6: Cost of 7 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; OPT-NF6: Cost of 7 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; OPT-NF6: Cost of 8 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; OPT-NF6: Cost of 8 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; OPT-NF6: Cost of 10 for VF vscale x 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; OPT-NF6: Cost of 10 for VF vscale x 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; OPT-NF6: Cost of 14 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; OPT-NF6: Cost of 14 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> ; NO-OPT-LABEL: Checking a loop in 'i8_factor_6' ; NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> ; NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> @@ -246,6 +310,14 @@ entry: ; NO-OPT: Cost of 48 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> ; NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> ; NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; NO-OPT: Cost of 12 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; NO-OPT: Cost of 12 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; NO-OPT: Cost of 24 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; NO-OPT: Cost of 24 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; NO-OPT: Cost of 48 for VF vscale x 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; NO-OPT: Cost of 48 for VF vscale x 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> +; NO-OPT: Cost of 96 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0> +; NO-OPT: Cost of 96 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0> for.body: %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] %p0 = getelementptr inbounds %i8.6, ptr %data, i64 %i, i32 0 @@ -293,6 +365,14 @@ entry: ; OPT-NF7: Cost of 11 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> ; OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> ; OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; OPT-NF7: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; OPT-NF7: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; OPT-NF7: Cost of 9 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; OPT-NF7: Cost of 9 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; OPT-NF7: Cost of 11 for VF vscale x 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; OPT-NF7: Cost of 11 for VF vscale x 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; OPT-NF7: Cost of 15 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; OPT-NF7: Cost of 15 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> ; NO-OPT-LABEL: Checking a loop in 'i8_factor_7' ; NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> ; NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> @@ -302,6 +382,14 @@ entry: ; NO-OPT: Cost of 56 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> ; NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> ; NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; NO-OPT: Cost of 14 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; NO-OPT: Cost of 14 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; NO-OPT: Cost of 28 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; NO-OPT: Cost of 28 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; NO-OPT: Cost of 56 for VF vscale x 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; NO-OPT: Cost of 56 for VF vscale x 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> +; NO-OPT: Cost of 112 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0> +; NO-OPT: Cost of 112 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0> for.body: %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] %p0 = getelementptr inbounds %i8.7, ptr %data, i64 %i, i32 0 @@ -353,6 +441,14 @@ entry: ; OPT-NF8: Cost of 12 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> ; OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> ; OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; OPT-NF8: Cost of 9 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; OPT-NF8: Cost of 9 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; OPT-NF8: Cost of 10 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; OPT-NF8: Cost of 10 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; OPT-NF8: Cost of 12 for VF vscale x 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; OPT-NF8: Cost of 12 for VF vscale x 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; OPT-NF8: Cost of 16 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; OPT-NF8: Cost of 16 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> ; NO-OPT-LABEL: Checking a loop in 'i8_factor_8' ; NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> ; NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> @@ -362,6 +458,14 @@ entry: ; NO-OPT: Cost of 64 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> ; NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> ; NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; NO-OPT: Cost of 16 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; NO-OPT: Cost of 16 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; NO-OPT: Cost of 32 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; NO-OPT: Cost of 32 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; NO-OPT: Cost of 64 for VF vscale x 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; NO-OPT: Cost of 64 for VF vscale x 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> +; NO-OPT: Cost of 128 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0> +; NO-OPT: Cost of 128 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0> for.body: %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] %p0 = getelementptr inbounds %i8.8, ptr %data, i64 %i, i32 0 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll index 0a87257..32cb426 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll @@ -146,7 +146,7 @@ define void @trip8_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 [[I_08]] ; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP15]], 1 @@ -379,8 +379,8 @@ define i8 @mul_non_pow_2_low_trip_count(ptr noalias %a) { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i8 [ 2, [[ENTRY]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi i8 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi i8 [ 2, [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[GEP]], align 1 ; CHECK-NEXT: [[MUL]] = mul i8 [[TMP5]], [[RDX]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll index 0afe04e..07a7b7b 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll @@ -1,29 +1,36 @@ -; RUN: opt -passes=loop-vectorize \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ -; RUN: -mtriple=riscv64 -mattr=+v -S -debug %s 2>&1 | FileCheck %s +; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -S -debug %s 2>&1 | FileCheck %s ; REQUIRES: asserts -; Make sure we do not vectorize a loop with a widened pointer induction. -define void @test_wide_pointer_induction(ptr noalias %a, i64 %N) { +; For %for.1, we are fine initially, because the previous value %for.1.next dominates the +; user of %for.1. But for %for.2, we have to sink the user (%for.1.next) past the previous +; value %for.2.next. This however breaks the condition we have for %for.1. We cannot fix +; both first order recurrences and cannot vectorize the loop. +; +; Make sure we don't compute costs if there are no vector VPlans. + ; CHECK-NOT: LV: Vector loop of width {{.+}} costs: ; -; CHECK: define void @test_wide_pointer_induction( +; CHECK: define i32 @test( ; CHECK-NOT: vector.body ; +define i32 @test(i32 %N) { entry: - br label %loop + br label %for.body -loop: - %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] - %iv.ptr = phi ptr [ %a, %entry ], [ %iv.ptr.next, %loop ] - %arrayidx = getelementptr inbounds i64, ptr %a, i64 %iv - store ptr %iv.ptr, ptr %arrayidx, align 8 - %iv.next = add nuw nsw i64 %iv, 1 - %iv.ptr.next = getelementptr i64, ptr %iv.ptr, i32 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %exit, label %loop +for.body: ; preds = %for.body.preheader, %for.body + %iv = phi i32 [ %inc, %for.body ], [ 10, %entry ] + %for.1 = phi i32 [ %for.1.next, %for.body ], [ 20, %entry ] + %for.2 = phi i32 [ %for.2.next, %for.body ], [ 11, %entry ] + %for.1.next = add nsw i32 %for.2, 1 + %for.2.next = shl i32 %for.1, 24 + %inc = add nsw i32 %iv, 1 + %exitcond = icmp eq i32 %inc, %N + br i1 %exitcond, label %for.cond1.for.end_crit_edge, label %for.body -exit: - ret void +for.cond1.for.end_crit_edge: ; preds = %for.body + %add.lcssa = phi i32 [ %for.1.next, %for.body ] + %sext.lcssa = phi i32 [ %for.2.next, %for.body ] + %res = add i32 %add.lcssa, %sext.lcssa + ret i32 %res } diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll index 01df436..d41d47a 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll @@ -58,7 +58,7 @@ define void @test(ptr %p, i64 %a, i8 %b) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_COND1:%.*]] ; CHECK: for.cond: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH1]] ], [ [[ADD:%.*]], [[FOR_BODY:%.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH1]] ], [ [[ADD:%.*]], [[FOR_BODY:%.*]] ] ; CHECK-NEXT: [[ADD]] = add i32 [[IV]], 1 ; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2 ; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll index ed50796..c037b70 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll @@ -41,7 +41,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]] @@ -106,7 +106,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] ; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8 ; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]] @@ -172,8 +172,8 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ 0, [[ENTRY]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] ; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8 ; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]] @@ -238,7 +238,7 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 @@ -296,7 +296,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: store i64 [[V]], ptr [[B]], align 8 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 @@ -417,7 +417,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll index 9e492c6..df907dc 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll @@ -170,7 +170,6 @@ define void @single_constant_stride_ptr_iv(ptr %p) { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP8]] ; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; CHECK-NEXT: [[TMP16:%.*]] = mul <vscale x 4 x i64> [[TMP14]], splat (i64 8) ; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[TMP16]] @@ -181,6 +180,7 @@ define void @single_constant_stride_ptr_iv(ptr %p) { ; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[TMP19]], splat (i32 1) ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> [[VECTOR_GEP]], i32 4, <vscale x 4 x i1> splat (i1 true)) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP8]] ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP12]] ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] @@ -753,13 +753,11 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; STRIDED-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] ; STRIDED-NEXT: [[POINTER_PHI11:%.*]] = phi ptr [ [[P2]], [[VECTOR_PH]] ], [ [[PTR_IND12:%.*]], [[VECTOR_BODY]] ] -; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP13]] ; STRIDED-NEXT: [[TMP19:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; STRIDED-NEXT: [[DOTSPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0 ; STRIDED-NEXT: [[DOTSPLAT10:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer ; STRIDED-NEXT: [[TMP18:%.*]] = mul <vscale x 4 x i64> [[TMP19]], [[DOTSPLAT10]] ; STRIDED-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI11]], <vscale x 4 x i64> [[TMP18]] -; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP13]] ; STRIDED-NEXT: [[TMP27:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64() ; STRIDED-NEXT: [[TMP21:%.*]] = mul <vscale x 4 x i64> [[TMP27]], [[DOTSPLAT10]] ; STRIDED-NEXT: [[VECTOR_GEP7:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[TMP21]] @@ -767,7 +765,9 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-NEXT: [[TMP30:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1) ; STRIDED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP30]], <vscale x 4 x ptr> [[VECTOR_GEP]], i32 4, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META18:![0-9]+]], !noalias [[META15]] ; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP13]] +; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP13]] ; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP25]] +; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP13]] ; STRIDED-NEXT: [[PTR_IND12]] = getelementptr i8, ptr [[POINTER_PHI11]], i64 [[TMP17]] ; STRIDED-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; STRIDED-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll index ce2b790..2be74e5 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll @@ -1330,7 +1330,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; IF-EVL-NEXT: br label %[[LOOP:.*]] ; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[GEP]] to i64 ; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll index d02d53b..76a830a 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll @@ -57,8 +57,8 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP27]], 3 @@ -108,8 +108,8 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-INLOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP25]], 3 @@ -285,8 +285,8 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ] +; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] +; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ] ; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-OUTLOOP-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP28]], 3 @@ -339,8 +339,8 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] -; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ] +; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ] +; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ] ; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-INLOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP25]], 3 @@ -537,8 +537,8 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-OUTLOOP-NEXT: [[TMP37:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-OUTLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32 @@ -597,8 +597,8 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-INLOOP-NEXT: [[RDX1:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ] +; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-INLOOP-NEXT: [[RDX1:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ] ; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-INLOOP-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-INLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32 @@ -804,8 +804,8 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX1:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL-OUTLOOP: for.body: -; IF-EVL-OUTLOOP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ] -; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX1]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ] +; IF-EVL-OUTLOOP-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ] +; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ] ; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] ; IF-EVL-OUTLOOP-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4 ; IF-EVL-OUTLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV1]] to i32 @@ -867,8 +867,8 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) { ; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX1:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL-INLOOP: for.body: -; IF-EVL-INLOOP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ] -; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX1]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ] +; IF-EVL-INLOOP-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ] +; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ] ; IF-EVL-INLOOP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] ; IF-EVL-INLOOP-NEXT: [[TMP35:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4 ; IF-EVL-INLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV1]] to i32 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll index ae047f5..a216aa8 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll @@ -45,7 +45,7 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ] ; IF-EVL-NEXT: br label %[[LOOP:.*]] ; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] ; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8 ; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] @@ -166,7 +166,7 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ] ; IF-EVL-NEXT: br label %[[LOOP:.*]] ; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] ; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8 ; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] @@ -286,7 +286,7 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ] ; IF-EVL-NEXT: br label %[[LOOP:.*]] ; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] ; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8 ; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] @@ -406,7 +406,7 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ] ; IF-EVL-NEXT: br label %[[LOOP:.*]] ; IF-EVL: [[LOOP]]: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] ; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8 ; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll index 987f946..f92bf5a 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll @@ -53,8 +53,8 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 33, %[[ENTRY]] ] ; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] ; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP24:%.*]], %[[FOR_BODY]] ] +; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] +; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP24:%.*]], %[[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]] ; IF-EVL-NEXT: [[TMP24]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[TMP24]] @@ -192,9 +192,9 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: [[SCALAR_RECUR_INIT3:%.*]] = phi i32 [ 22, %[[ENTRY]] ] ; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] ; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP31:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT3]], %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ] +; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] +; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP31:%.*]], %[[FOR_BODY]] ] +; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ 22, %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]] ; IF-EVL-NEXT: [[TMP31]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[FOR2]] @@ -353,10 +353,10 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-NEXT: [[SCALAR_RECUR_INIT6:%.*]] = phi i32 [ 11, %[[ENTRY]] ] ; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] ; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP38:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT5]], %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR3:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT6]], %[[SCALAR_PH]] ], [ [[FOR2]], %[[FOR_BODY]] ] +; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] +; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP38:%.*]], %[[FOR_BODY]] ] +; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ 22, %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ] +; IF-EVL-NEXT: [[FOR3:%.*]] = phi i32 [ 11, %[[SCALAR_PH]] ], [ [[FOR2]], %[[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]] ; IF-EVL-NEXT: [[TMP38]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR2]], [[FOR3]] @@ -666,8 +666,8 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) { ; IF-EVL-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ 33, %[[ENTRY]] ] ; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] ; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV1_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP14:%.*]], %[[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV1_NEXT:%.*]], %[[FOR_BODY]] ] +; IF-EVL-NEXT: [[FOR1:%.*]] = phi i64 [ 33, %[[SCALAR_PH]] ], [ [[TMP14:%.*]], %[[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP14]] = add i64 [[IV1]], 42 ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i64, ptr [[A]], i64 [[IV1]] ; IF-EVL-NEXT: store i64 [[FOR1]], ptr [[ARRAYIDX]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll index 2aeb1d0..da5aed9 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll @@ -51,7 +51,7 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ] ; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT1:%.*]], [[FOR_BODY1]] ] ; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX]], i64 [[INDVARS_IV1]] ; IF-EVL-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8 ; IF-EVL-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[TMP0]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll index 3e23df7..433d1e4 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll @@ -44,8 +44,8 @@ define i32 @add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[ADD]] = add nsw i32 [[TMP18]], [[RDX]] @@ -259,8 +259,8 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[OR]] = or i32 [[TMP18]], [[RDX]] @@ -367,8 +367,8 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[AND]] = and i32 [[TMP18]], [[RDX]] @@ -475,8 +475,8 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[XOR]] = xor i32 [[TMP18]], [[RDX]] @@ -583,8 +583,8 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP17]], [[RDX]] @@ -694,8 +694,8 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp sgt i32 [[TMP17]], [[RDX]] @@ -805,8 +805,8 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[TMP17]], [[RDX]] @@ -916,8 +916,8 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP17]], [[RDX]] @@ -1027,8 +1027,8 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[ADD]] = fadd reassoc float [[TMP18]], [[RDX]] @@ -1243,8 +1243,8 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP17]], [[RDX]] @@ -1356,8 +1356,8 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast ogt float [[TMP17]], [[RDX]] @@ -1687,8 +1687,8 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] @@ -1807,8 +1807,8 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP21]], 3 @@ -1924,8 +1924,8 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP_I:%.*]] = fcmp fast olt float [[TMP21]], 3.000000e+00 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll index 8d987a9..c5d2739 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll @@ -50,7 +50,7 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) { ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 0 ; IF-EVL-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 1 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll index d474a03..62a4f73 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll @@ -39,7 +39,7 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) { ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY1:%.*]] ] ; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV1:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[IV1:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[IV1]] ; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[IV1]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll index 06c6bfe..296405d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll @@ -44,7 +44,7 @@ define void @trip_count_max_1024(ptr %p, i64 %tc) vscale_range(2, 1024) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]] ; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8 ; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1 @@ -113,7 +113,7 @@ define void @overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]] ; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8 ; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1 @@ -182,7 +182,7 @@ define void @no_overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]] ; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8 ; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll index 5f407fc..e06bbe9 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll @@ -43,7 +43,7 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) { ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; IF-EVL-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ 0, [[SCALAR_PH]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I_011]] ; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP1:%.*]] = icmp ne i32 [[TMP23]], 0 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll index 59d1370..775d9ca 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll @@ -43,8 +43,8 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[ADD]] = fadd float [[TMP17]], [[SUM_07]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll index 2d5718b..464667d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll @@ -44,8 +44,8 @@ define i32 @add(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[ADD]] = add nsw i32 [[TMP18]], [[RDX]] @@ -262,8 +262,8 @@ define i32 @or(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[OR]] = or i32 [[TMP18]], [[RDX]] @@ -373,8 +373,8 @@ define i32 @and(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[AND]] = and i32 [[TMP18]], [[RDX]] @@ -484,8 +484,8 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[XOR]] = xor i32 [[TMP18]], [[RDX]] @@ -597,8 +597,8 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP19]], [[RDX]] @@ -715,8 +715,8 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp sgt i32 [[TMP19]], [[RDX]] @@ -833,8 +833,8 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[TMP19]], [[RDX]] @@ -951,8 +951,8 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP19]], [[RDX]] @@ -1067,8 +1067,8 @@ define float @fadd(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[ADD]] = fadd reassoc float [[TMP18]], [[RDX]] @@ -1287,8 +1287,8 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP19]], [[RDX]] @@ -1405,8 +1405,8 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast ogt float [[TMP19]], [[RDX]] @@ -1739,8 +1739,8 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] @@ -1859,8 +1859,8 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP20]], 3 @@ -1976,8 +1976,8 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) { ; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[CMP_I:%.*]] = fcmp fast olt float [[TMP20]], 3.000000e+00 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll index e2db28d..397cb95 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll @@ -57,8 +57,8 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt ; IF-EVL-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i32 [ 0, [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 ; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[ADD]] ; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 @@ -205,8 +205,8 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal ; IF-EVL-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i32 [ 0, [[ENTRY]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ] -; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_INC]] ] +; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ] +; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_INC]] ] ; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 ; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i32 [[I]] ; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 @@ -388,7 +388,7 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, [[ENTRY:%.*]] ] ; IF-EVL-NEXT: br label [[LOOP:%.*]] ; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 1024, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; IF-EVL-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]] ; IF-EVL-NEXT: [[X:%.*]] = load i8, ptr [[GEP_A]], align 1 ; IF-EVL-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i8 [[X]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll index 1c78b25..2ec23b91 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll @@ -44,7 +44,7 @@ define void @test(ptr %p) { ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; IF-EVL-NEXT: br label [[LOOP:%.*]] ; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] ; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 8 ; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 200 @@ -375,7 +375,7 @@ define void @trivial_due_max_vscale(ptr %p) { ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; IF-EVL-NEXT: br label [[LOOP:%.*]] ; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] ; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 ; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 8192 @@ -483,7 +483,7 @@ define void @no_high_lmul_or_interleave(ptr %p) { ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; IF-EVL-NEXT: br label [[LOOP:%.*]] ; IF-EVL: loop: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] ; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 ; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 1024 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll index 687a2e7..ab05166 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll @@ -50,7 +50,7 @@ define void @lshift_significand(i32 %n, ptr nocapture writeonly %dst) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[TMP22:%.*]] = sub nuw nsw i64 1, [[IV1]] ; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP22]] ; CHECK-NEXT: store i64 0, ptr [[ARRAYIDX14]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll index 24649729..034b767 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll @@ -179,7 +179,7 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[F_039:%.*]] = phi i8 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[F_039:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ] ; CHECK-NEXT: [[TMP4:%.*]] = or i8 23, [[X]] ; CHECK-NEXT: [[EXTRACT_T:%.*]] = trunc i8 [[TMP4]] to i1 ; CHECK-NEXT: br i1 [[EXTRACT_T]], label %[[THEN:.*]], label %[[LOOP_LATCH]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll index dfdc893..01edeed 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll @@ -36,7 +36,7 @@ define void @truncate_to_minimal_bitwidths_widen_cast_recipe(ptr %src) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[GEP_SRC1:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV1]] ; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr [[GEP_SRC1]], align 1 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP11]] to i32 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll index 568aa95..d97e93d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll @@ -117,7 +117,7 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6 ; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] ; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] ; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 ; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 @@ -439,7 +439,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] ; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] +; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] ; TF-SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10 ; TF-SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_LOAD:.*]], label %[[LATCH]] ; TF-SCALABLE: [[DO_LOAD]]: @@ -589,7 +589,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] ; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] ; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 1 ; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 @@ -726,7 +726,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] ; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8 ; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 @@ -890,7 +890,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] ; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] ; TF-SCALABLE-NEXT: store i64 [[IV]], ptr [[B]], align 8 ; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 @@ -1068,7 +1068,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] ; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] +; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] ; TF-SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10 ; TF-SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_STORE:.*]], label %[[LATCH]] ; TF-SCALABLE: [[DO_STORE]]: @@ -1216,7 +1216,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] ; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1 ; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll index 7c1ec9a..d93a5c0 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll @@ -27,7 +27,7 @@ define void @foo(ptr %arg) #0 { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr [3 x i64], ptr [[ARG]], i64 0, i64 [[IV]] ; CHECK-NEXT: store i64 0, ptr [[GEP]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll index 85116fe..d3c3c6b 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll @@ -43,7 +43,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll index 082e326..0fb4655 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll @@ -42,7 +42,7 @@ define void @test_scalar_steps_target_instruction_cost(ptr %dst) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]] ; CHECK-NEXT: store i64 [[IV]], ptr [[GEP]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 3 diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll index 02a876a..d7cc6f0 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll @@ -96,7 +96,7 @@ define void @test(ptr %p, i40 %a) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[SHL:%.*]] = shl i40 [[A]], 24 ; CHECK-NEXT: [[ASHR:%.*]] = ashr i40 [[SHL]], 28 ; CHECK-NEXT: [[TRUNC:%.*]] = trunc i40 [[ASHR]] to i32 diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll index e0fc73f..4e46a29 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll @@ -69,8 +69,8 @@ define void @func_21() { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[LV:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[LV:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[A_PTR:%.*]] = getelementptr inbounds [5 x i32], ptr @A, i64 0, i64 [[INDVARS_IV]] ; CHECK-NEXT: [[LV]] = load i32, ptr [[A_PTR]], align 4 ; CHECK-NEXT: [[B_PTR:%.*]] = getelementptr inbounds [5 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll b/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll index c61b1b9..37493d1 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll @@ -117,7 +117,7 @@ define void @redundant_or_1(ptr %dst, i1 %c.0, i1 %c.1) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] ; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] ; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[THEN_1:%.*]] ; CHECK: then.1: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2 @@ -220,7 +220,7 @@ define void @redundant_or_2(ptr %dst, i1 %c.0, i1 %c.1) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] ; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] ; CHECK-NEXT: br i1 [[C_1]], label [[LOOP_LATCH]], label [[THEN_1:%.*]] ; CHECK: then.1: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2 diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll index 85b475c..1a3ff6c 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll @@ -1055,8 +1055,8 @@ define i64 @live_in_known_1_via_scev() { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ 3, [[PH]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RED_MUL:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 3, [[SCALAR_PH]] ], [ [[RED_MUL:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[RED_MUL]] = mul nsw i64 [[RED]], [[P_EXT]] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll index 1249df4..ee85e0e 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll @@ -46,8 +46,8 @@ define i1 @fn(ptr %nno) #0 { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ] ; CHECK-NEXT: br label [[FOR_BODY20:%.*]] ; CHECK: loop.header: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC35:%.*]] ] -; CHECK-NEXT: [[SUM_01:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_1:%.*]], [[FOR_INC35]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 10, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC35:%.*]] ] +; CHECK-NEXT: [[SUM_01:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[SUM_1:%.*]], [[FOR_INC35]] ] ; CHECK-NEXT: [[REM4:%.*]] = and i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i64 [[REM4]], 0 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[NNO]], i64 [[INDVARS_IV]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll index fe2ad66..07b130b 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll @@ -507,8 +507,8 @@ define void @test_first_order_recurrence_tried_to_scalarized(ptr %dst, i1 %c, i3 ; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 4, [[ENTRY]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ 4, [[SCALAR_PH]] ], [ [[IV]], [[LOOP]] ] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[FOR]] ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds nuw i32, ptr [[DST]], i32 [[IV]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll index fcd94f4..a66800c 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll @@ -623,7 +623,7 @@ define void @wide_iv_trunc(ptr %dst, i64 %N) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[LOOP_PREHEADER]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32 ; CHECK-NEXT: store i32 [[IV_TRUNC]], ptr [[DST]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll index 07e2df3..c5ac0ae 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll @@ -35,7 +35,7 @@ define i32 @foo_optsize() #0 { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]] ; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0 @@ -72,7 +72,7 @@ define i32 @foo_optsize() #0 { ; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; AUTOVF-NEXT: br label [[FOR_BODY:%.*]] ; AUTOVF: for.body: -; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] ; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]] ; AUTOVF-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; AUTOVF-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0 @@ -131,7 +131,7 @@ define i32 @foo_minsize() #1 { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]] ; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0 @@ -168,7 +168,7 @@ define i32 @foo_minsize() #1 { ; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; AUTOVF-NEXT: br label [[FOR_BODY:%.*]] ; AUTOVF: for.body: -; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] ; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]] ; AUTOVF-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; AUTOVF-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0 @@ -379,7 +379,7 @@ define void @tail_folded_store_avx512(ptr %start, ptr %end) #3 { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[START]], [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 -72 ; CHECK-NEXT: store ptr null, ptr [[PTR_IV]], align 8 ; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]] @@ -423,7 +423,7 @@ define void @tail_folded_store_avx512(ptr %start, ptr %end) #3 { ; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[START]], [[ENTRY:%.*]] ] ; AUTOVF-NEXT: br label [[LOOP:%.*]] ; AUTOVF: loop: -; AUTOVF-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ] +; AUTOVF-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ] ; AUTOVF-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 -72 ; AUTOVF-NEXT: store ptr null, ptr [[PTR_IV]], align 8 ; AUTOVF-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll index 08adfdd..11c5e39 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll @@ -44,7 +44,7 @@ define void @test(ptr noundef align 8 dereferenceable_or_null(16) %arr) #0 { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 99, [[BB5:%.*]] ] ; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] ; CHECK: loop.header: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 99, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] ; CHECK-NEXT: [[AND:%.*]] = and i64 [[IV]], 1 ; CHECK-NEXT: [[ICMP17:%.*]] = icmp eq i64 [[AND]], 0 ; CHECK-NEXT: br i1 [[ICMP17]], label [[BB18:%.*]], label [[LOOP_LATCH]], !prof [[PROF5:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll index 440f6e1..4145967 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll @@ -53,7 +53,7 @@ define void @value_defined_in_loop1_used_for_trip_counts(i32 %start, i1 %c, ptr ; CHECK-NEXT: [[EC_2:%.*]] = icmp ult i64 [[IV_2]], [[IV_1_LCSSA]] ; CHECK-NEXT: br i1 [[EC_2]], label %[[LOOP_2]], label %[[EXIT_1_LOOPEXIT:.*]] ; CHECK: [[LOOP_3]]: -; CHECK-NEXT: [[IV_4:%.*]] = phi i64 [ [[IV_4_NEXT:%.*]], %[[LOOP_3]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[IV_4:%.*]] = phi i64 [ [[IV_4_NEXT:%.*]], %[[LOOP_3]] ], [ 0, %[[SCALAR_PH]] ] ; CHECK-NEXT: [[GEP_DST_2:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_4]] ; CHECK-NEXT: store i8 0, ptr [[GEP_DST_2]], align 1 ; CHECK-NEXT: [[IV_4_NEXT]] = add i64 [[IV_4]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll index 5e35c4a..9a81fae 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll @@ -35,7 +35,7 @@ define dso_local void @tail_folding_enabled(ptr noalias nocapture %A, ptr noalia ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]] @@ -99,7 +99,7 @@ define dso_local void @tail_folding_disabled(ptr noalias nocapture %A, ptr noali ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]] @@ -181,8 +181,8 @@ define i32 @reduction_i32(ptr nocapture readonly %A, ptr nocapture readonly %B, ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[SUM_0:%.*]] = phi i32 [ [[SUM_1:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-NEXT: [[SUM_0:%.*]] = phi i32 [ [[SUM_1:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDXA]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll index f7eba42..a926ff4 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll @@ -146,7 +146,7 @@ define void @vectorized1(ptr noalias nocapture %A, ptr noalias nocapture readonl ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll index 59f2925..e7fa655 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll @@ -43,7 +43,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] ; IF-EVL-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll index e9d85c2..f4fe120 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll @@ -79,7 +79,7 @@ define void @test_pr59090(ptr %l_out, ptr noalias %b) #0 { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[IV_MUL:%.*]] = mul nuw i64 [[IV]], 6 ; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[B]], align 1, !llvm.access.group [[ACC_GRP0]] ; CHECK-NEXT: store i8 [[L]], ptr [[B]], align 1, !llvm.access.group [[ACC_GRP0]] diff --git a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll index 42d45bd..8ac33a1 100644 --- a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll +++ b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll @@ -102,9 +102,9 @@ define void @pr47390(ptr %a) { ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[PRIMARY:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[PRIMARY_ADD:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[USE_PRIMARY:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], %[[SCALAR_PH]] ], [ [[PRIMARY]], %[[LOOP]] ] -; CHECK-NEXT: [[SECONDARY:%.*]] = phi i32 [ [[BC_RESUME_VAL2]], %[[SCALAR_PH]] ], [ [[SECONDARY_ADD:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[PRIMARY:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[PRIMARY_ADD:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[USE_PRIMARY:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[PRIMARY]], %[[LOOP]] ] +; CHECK-NEXT: [[SECONDARY:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[SECONDARY_ADD:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[PRIMARY_ADD]] = add i32 [[PRIMARY]], 1 ; CHECK-NEXT: [[SECONDARY_ADD]] = add i32 [[SECONDARY]], 1 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[SECONDARY]] diff --git a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll index c8cf2ad..9852f53 100644 --- a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll +++ b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll @@ -540,3 +540,227 @@ loop.latch: exit: ret void } + +; The start access is SCEV with non-constant offset because of variable `iv.start` +; for IV. +define void @deref_assumption_loop_access_start_variable(i8 %v, ptr noundef %P, i64 range(i64 0, 2000) %N, ptr noalias %b, ptr noalias %c, i64 range(i64 0, 2000) %iv.start) nofree nosync { +; CHECK-LABEL: define void @deref_assumption_loop_access_start_variable( +; CHECK-SAME: i8 [[V:%.*]], ptr noundef [[P:%.*]], i64 range(i64 0, 2000) [[N:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 range(i64 0, 2000) [[IV_START:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[A:%.*]] = getelementptr i8, ptr [[P]], i64 16 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IV_START]], [[N]] +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) +; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[N]], 4 +; CHECK-NEXT: [[ADD:%.*]] = add i64 [[MUL]], 16 +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 [[ADD]]) ] +; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[N]], [[IV_START]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 2 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 2 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[IV_START]], [[N_VEC]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[IV_START]], [[INDEX]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP6]], align 1 +; CHECK-NEXT: [[TMP8:%.*]] = icmp sge <2 x i32> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = xor <2 x i1> [[TMP8]], splat (i1 true) +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; CHECK-NEXT: br i1 [[TMP5]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[OFFSET_IDX]], 0 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP16]] +; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP7]], align 1 +; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP19]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; CHECK: [[PRED_LOAD_CONTINUE]]: +; CHECK-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; CHECK-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_IF1]]: +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 1 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 1 +; CHECK-NEXT: [[TMP15:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP14]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_CONTINUE2]]: +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP15]], %[[PRED_LOAD_IF1]] ] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP8]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[WIDE_LOAD1]] +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP17]], align 1 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP1]], %[[MIDDLE_BLOCK]] ], [ [[IV_START]], %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 1 +; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 +; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] +; CHECK: [[LOOP_THEN]]: +; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1 +; CHECK-NEXT: br label %[[LOOP_LATCH]] +; CHECK: [[LOOP_LATCH]]: +; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] +; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 1 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[TERM_COND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TERM_COND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; + +entry: + %a = getelementptr i8, ptr %P, i64 16 + %cmp = icmp slt i64 %iv.start, %N + call void @llvm.assume(i1 %cmp) + %mul = mul i64 %N, 4 + %add = add i64 %mul, 16 + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %P, i64 %add) ] + br label %loop + +loop: ; preds = %mainloop, %loop.latch + %iv = phi i64 [ %iv.next, %loop.latch ], [ %iv.start, %entry ] + %gep.a = getelementptr inbounds i32, ptr %a, i64 %iv + %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv + %l.b = load i32, ptr %gep.b, align 1 + %c.1 = icmp sge i32 %l.b, 0 + br i1 %c.1, label %loop.latch, label %loop.then + +loop.then: ; preds = %loop + %l.a = load i32, ptr %gep.a, align 1 + br label %loop.latch + +loop.latch: ; preds = %loop.then, %loop + %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop ] + %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv + store i32 %merge, ptr %gep.c, align 1 + %iv.next = add nuw nsw i64 %iv, 1 + %term.cond = icmp slt i64 %iv.next, %N + br i1 %term.cond, label %loop, label %exit + +exit: + ret void +} + +; Same as previous test, but `iv.start` is not known nonnegative. +define void @deref_assumption_loop_access_start_variable_unknown_range(i8 %v, ptr noundef %P, i64 range(i64 0, 2000) %N, ptr noalias %b, ptr noalias %c, i64 %iv.start) nofree nosync { +; CHECK-LABEL: define void @deref_assumption_loop_access_start_variable_unknown_range( +; CHECK-SAME: i8 [[V:%.*]], ptr noundef [[P:%.*]], i64 range(i64 0, 2000) [[N:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[IV_START:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[A:%.*]] = getelementptr i8, ptr [[P]], i64 16 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IV_START]], [[N]] +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) +; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[N]], 4 +; CHECK-NEXT: [[ADD:%.*]] = add i64 [[MUL]], 16 +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 [[ADD]]) ] +; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[N]], [[IV_START]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 2 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 2 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[IV_START]], [[N_VEC]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[IV_START]], [[INDEX]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP2]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = icmp sge <2 x i32> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = xor <2 x i1> [[TMP3]], splat (i1 true) +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 +; CHECK-NEXT: br i1 [[TMP5]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 0 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 1 +; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP8]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; CHECK: [[PRED_LOAD_CONTINUE]]: +; CHECK-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 +; CHECK-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_IF1]]: +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 1 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 1 +; CHECK-NEXT: [[TMP15:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP14]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_CONTINUE2]]: +; CHECK-NEXT: [[TMP16:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP15]], %[[PRED_LOAD_IF1]] ] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP3]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[TMP16]] +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP17]], align 1 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP1]], %[[MIDDLE_BLOCK]] ], [ [[IV_START]], %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 1 +; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 +; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] +; CHECK: [[LOOP_THEN]]: +; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1 +; CHECK-NEXT: br label %[[LOOP_LATCH]] +; CHECK: [[LOOP_LATCH]]: +; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] +; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 1 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[TERM_COND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TERM_COND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + %a = getelementptr i8, ptr %P, i64 16 + %cmp = icmp slt i64 %iv.start, %N + call void @llvm.assume(i1 %cmp) + %mul = mul i64 %N, 4 + %add = add i64 %mul, 16 + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %P, i64 %add) ] + br label %loop + +loop: ; preds = %mainloop, %loop.latch + %iv = phi i64 [ %iv.next, %loop.latch ], [ %iv.start, %entry ] + %gep.a = getelementptr inbounds i32, ptr %a, i64 %iv + %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv + %l.b = load i32, ptr %gep.b, align 1 + %c.1 = icmp sge i32 %l.b, 0 + br i1 %c.1, label %loop.latch, label %loop.then + +loop.then: ; preds = %loop + %l.a = load i32, ptr %gep.a, align 1 + br label %loop.latch + +loop.latch: ; preds = %loop.then, %loop + %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop ] + %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv + store i32 %merge, ptr %gep.c, align 1 + %iv.next = add nuw nsw i64 %iv, 1 + %term.cond = icmp slt i64 %iv.next, %N + br i1 %term.cond, label %loop, label %exit + +exit: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll index 1936b40..d666487 100644 --- a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll +++ b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll @@ -203,7 +203,7 @@ define dso_local void @cannotProveAlignedTC(ptr noalias nocapture %A, i32 %p, i3 ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[LOOP_PREHEADER]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ [[RIVPLUS1:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ [[RIVPLUS1:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]] ; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll index 3adfcf5..db97bdf 100644 --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll @@ -2750,9 +2750,9 @@ define i32 @sink_into_replication_region(i32 %y) { ; UNROLL-NO-IC-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP51]], [[MIDDLE_BLOCK]] ] ; UNROLL-NO-IC-NEXT: ret i32 [[VAR]] ; UNROLL-NO-IC: bb2: -; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ] +; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] +; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] ; UNROLL-NO-IC-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] ; UNROLL-NO-IC-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] ; UNROLL-NO-IC-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 @@ -2813,9 +2813,9 @@ define i32 @sink_into_replication_region(i32 %y) { ; UNROLL-NO-VF-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; UNROLL-NO-VF-NEXT: ret i32 [[VAR]] ; UNROLL-NO-VF: bb2: -; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ] +; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] +; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] ; UNROLL-NO-VF-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] ; UNROLL-NO-VF-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] ; UNROLL-NO-VF-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 @@ -2899,9 +2899,9 @@ define i32 @sink_into_replication_region(i32 %y) { ; SINK-AFTER-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP27]], [[MIDDLE_BLOCK]] ] ; SINK-AFTER-NEXT: ret i32 [[VAR]] ; SINK-AFTER: bb2: -; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ] +; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] +; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] ; SINK-AFTER-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] ; SINK-AFTER-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] ; SINK-AFTER-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1 @@ -3113,10 +3113,10 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) { ; UNROLL-NO-IC-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP75]], [[MIDDLE_BLOCK]] ] ; UNROLL-NO-IC-NEXT: ret i32 [[VAR]] ; UNROLL-NO-IC: bb2: -; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ] -; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ] +; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] +; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] +; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] ; UNROLL-NO-IC-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]] ; UNROLL-NO-IC-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] ; UNROLL-NO-IC-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] @@ -3194,10 +3194,10 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) { ; UNROLL-NO-VF-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; UNROLL-NO-VF-NEXT: ret i32 [[VAR]] ; UNROLL-NO-VF: bb2: -; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ] -; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ] +; UNROLL-NO-VF-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] +; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] +; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] ; UNROLL-NO-VF-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]] ; UNROLL-NO-VF-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] ; UNROLL-NO-VF-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] @@ -3316,10 +3316,10 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) { ; SINK-AFTER-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP39]], [[MIDDLE_BLOCK]] ] ; SINK-AFTER-NEXT: ret i32 [[VAR]] ; SINK-AFTER: bb2: -; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ] -; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ] +; SINK-AFTER-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] +; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] +; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ] ; SINK-AFTER-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]] ; SINK-AFTER-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]] ; SINK-AFTER-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]] diff --git a/llvm/test/Transforms/LoopVectorize/intrinsic.ll b/llvm/test/Transforms/LoopVectorize/intrinsic.ll index 9c910d7..10d83a4 100644 --- a/llvm/test/Transforms/LoopVectorize/intrinsic.ll +++ b/llvm/test/Transforms/LoopVectorize/intrinsic.ll @@ -324,6 +324,56 @@ for.end: ; preds = %for.body, %entry declare double @llvm.exp2.f64(double) +define void @ldexp_f32i32(i32 %n, ptr %y, ptr %x, i32 %exp) { +; CHECK-LABEL: @ldexp_f32i32( +; CHECK: llvm.ldexp.v4f32.v4i32 +; CHECK: ret void +; +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv + %0 = load float, ptr %arrayidx, align 4 + %call = tail call float @llvm.ldexp.f32.i32(float %0, i32 %exp) + %arrayidx2 = getelementptr inbounds float, ptr %x, i32 %iv + store float %call, ptr %arrayidx2, align 4 + %iv.next = add i32 %iv, 1 + %exitcond = icmp eq i32 %iv.next, %n + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +declare float @llvm.ldexp.f32.i32(float, i32) + +define void @ldexp_f64i32(i32 %n, ptr %y, ptr %x, i32 %exp) { +; CHECK-LABEL: @ldexp_f64i32( +; CHECK: llvm.ldexp.v4f64.v4i32 +; CHECK: ret void +; +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv + %0 = load double, ptr %arrayidx, align 8 + %call = tail call double @llvm.ldexp.f64.i32(double %0, i32 %exp) + %arrayidx2 = getelementptr inbounds double, ptr %x, i32 %iv + store double %call, ptr %arrayidx2, align 8 + %iv.next = add i32 %iv, 1 + %exitcond = icmp eq i32 %iv.next, %n + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +declare double @llvm.ldexp.f64.i32(double, i32) + define void @log_f32(i32 %n, ptr %y, ptr %x) { ; CHECK-LABEL: @log_f32( ; CHECK: llvm.log.v4f32 @@ -976,6 +1026,157 @@ for.end: ; preds = %for.body, %entry declare double @llvm.roundeven.f64(double) + +define void @lround_i32f32(i32 %n, ptr %y, ptr %x) { +; CHECK-LABEL: @lround_i32f32( +; CHECK: llvm.lround.v4i32.v4f32 +; CHECK: ret void +; +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv + %0 = load float, ptr %arrayidx, align 4 + %call = tail call i32 @llvm.lround.i32.f32(float %0) + %arrayidx2 = getelementptr inbounds i32, ptr %x, i32 %iv + store i32 %call, ptr %arrayidx2, align 4 + %iv.next = add i32 %iv, 1 + %exitcond = icmp eq i32 %iv.next, %n + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +declare i32 @llvm.lround.i32.f32(float) + +define void @lround_i32f64(i32 %n, ptr %y, ptr %x) { +; CHECK-LABEL: @lround_i32f64( +; CHECK: llvm.lround.v4i32.v4f64 +; CHECK: ret void +; +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv + %0 = load double, ptr %arrayidx, align 8 + %call = tail call i32 @llvm.lround.i32.f64(double %0) + %arrayidx2 = getelementptr inbounds i32, ptr %x, i32 %iv + store i32 %call, ptr %arrayidx2, align 8 + %iv.next = add i32 %iv, 1 + %exitcond = icmp eq i32 %iv.next, %n + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +declare i32 @llvm.lround.i32.f64(double) + +define void @lround_i64f32(i32 %n, ptr %y, ptr %x) { +; CHECK-LABEL: @lround_i64f32( +; CHECK: llvm.lround.v4i64.v4f32 +; CHECK: ret void +; +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv + %0 = load float, ptr %arrayidx, align 4 + %call = tail call i64 @llvm.lround.i64.f32(float %0) + %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv + store i64 %call, ptr %arrayidx2, align 4 + %iv.next = add i32 %iv, 1 + %exitcond = icmp eq i32 %iv.next, %n + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +declare i64 @llvm.lround.i64.f32(float) + +define void @lround_i64f64(i32 %n, ptr %y, ptr %x) { +; CHECK-LABEL: @lround_i64f64( +; CHECK: llvm.lround.v4i64.v4f64 +; CHECK: ret void +; +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv + %0 = load double, ptr %arrayidx, align 8 + %call = tail call i64 @llvm.lround.i64.f64(double %0) + %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv + store i64 %call, ptr %arrayidx2, align 8 + %iv.next = add i32 %iv, 1 + %exitcond = icmp eq i32 %iv.next, %n + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +declare i64 @llvm.lround.i64.f64(double) + +define void @llround_i64f32(i32 %n, ptr %y, ptr %x) { +; CHECK-LABEL: @llround_i64f32( +; CHECK: llvm.llround.v4i64.v4f32 +; CHECK: ret void +; +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv + %0 = load float, ptr %arrayidx, align 4 + %call = tail call i64 @llvm.llround.i64.f32(float %0) + %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv + store i64 %call, ptr %arrayidx2, align 4 + %iv.next = add i32 %iv, 1 + %exitcond = icmp eq i32 %iv.next, %n + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +declare i64 @llvm.llround.i64.f32(float) + +define void @llround_i64f64(i32 %n, ptr %y, ptr %x) { +; CHECK-LABEL: @llround_i64f64( +; CHECK: llvm.llround.v4i64.v4f64 +; CHECK: ret void +; +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv + %0 = load double, ptr %arrayidx, align 8 + %call = tail call i64 @llvm.llround.i64.f64(double %0) + %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv + store i64 %call, ptr %arrayidx2, align 8 + %iv.next = add i32 %iv, 1 + %exitcond = icmp eq i32 %iv.next, %n + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +declare i64 @llvm.llround.i64.f64(double) + define void @fma_f32(i32 %n, ptr %y, ptr %x, ptr %z, ptr %w) { ; CHECK-LABEL: @fma_f32( ; CHECK: llvm.fma.v4f32 diff --git a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll index a0068f0..d6acba5 100644 --- a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll +++ b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll @@ -473,8 +473,8 @@ define i16 @select_decreasing_induction_icmp_table_i16(i16 noundef %val) { ; IC4VF4-NEXT: [[BC_MERGE_RDX:%.*]] = phi i16 [ 0, %[[ENTRY]] ] ; IC4VF4-NEXT: br label %[[LOOP:.*]] ; IC4VF4: [[LOOP]]: -; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] +; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] ; IC4VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]] ; IC4VF4-NEXT: [[LD_TABLE:%.*]] = load i16, ptr [[GEP_TABLE_IV]], align 1 ; IC4VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = icmp ugt i16 [[LD_TABLE]], [[VAL]] @@ -844,8 +844,8 @@ define i16 @select_decreasing_induction_icmp_table_half(half noundef %val) { ; IC4VF4-NEXT: [[BC_MERGE_RDX:%.*]] = phi i16 [ 0, %[[ENTRY]] ] ; IC4VF4-NEXT: br label %[[LOOP:.*]] ; IC4VF4: [[LOOP]]: -; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] +; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ] ; IC4VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]] ; IC4VF4-NEXT: [[LD_TABLE:%.*]] = load half, ptr [[GEP_TABLE_IV]], align 1 ; IC4VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = fcmp ugt half [[LD_TABLE]], [[VAL]] diff --git a/llvm/test/Transforms/LoopVectorize/loop-form.ll b/llvm/test/Transforms/LoopVectorize/loop-form.ll index 10b2e70..22ebf92 100644 --- a/llvm/test/Transforms/LoopVectorize/loop-form.ll +++ b/llvm/test/Transforms/LoopVectorize/loop-form.ll @@ -84,7 +84,7 @@ define void @bottom_tested(ptr %p, i32 %n) { ; TAILFOLD-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; TAILFOLD-NEXT: br label [[FOR_COND:%.*]] ; TAILFOLD: for.cond: -; TAILFOLD-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_COND]] ] +; TAILFOLD-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_COND]] ] ; TAILFOLD-NEXT: [[IPROM:%.*]] = sext i32 [[I]] to i64 ; TAILFOLD-NEXT: [[B:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IPROM]] ; TAILFOLD-NEXT: store i16 0, ptr [[B]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll b/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll index c9066f2..72bc181 100644 --- a/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll +++ b/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll @@ -74,7 +74,7 @@ define void @maxvf3() { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[J:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[J_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[J:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[J_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[AJ:%.*]] = getelementptr inbounds [18 x i8], ptr @a, i32 0, i32 [[J]] ; CHECK-NEXT: store i8 69, ptr [[AJ]], align 8 ; CHECK-NEXT: [[JP3:%.*]] = add nuw nsw i32 3, [[J]] diff --git a/llvm/test/Transforms/LoopVectorize/optsize.ll b/llvm/test/Transforms/LoopVectorize/optsize.ll index f0d026b..b9ee09e 100644 --- a/llvm/test/Transforms/LoopVectorize/optsize.ll +++ b/llvm/test/Transforms/LoopVectorize/optsize.ll @@ -626,6 +626,7 @@ define i32 @pr45526_pgso() !prof !14 { ; NPGSO-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; NPGSO: [[MIDDLE_BLOCK]]: ; NPGSO-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3 +; NPGSO-NEXT: br label %[[SCALAR_PH]] ; NPGSO: [[SCALAR_PH]]: ; NPGSO-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 508, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] ; NPGSO-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 5, %[[ENTRY]] ] @@ -698,7 +699,7 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] ; CHECK-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]] ; CHECK-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]] ; CHECK-NEXT: store i16 42, ptr [[GEPOFB]], align 4 @@ -747,7 +748,7 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize { ; PGSO-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ] ; PGSO-NEXT: br label %[[FOR_BODY:.*]] ; PGSO: [[FOR_BODY]]: -; PGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; PGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] ; PGSO-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]] ; PGSO-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]] ; PGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4 @@ -796,7 +797,7 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize { ; NPGSO-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ] ; NPGSO-NEXT: br label %[[FOR_BODY:.*]] ; NPGSO: [[FOR_BODY]]: -; NPGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; NPGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ] ; NPGSO-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]] ; NPGSO-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]] ; NPGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/pointer-induction.ll b/llvm/test/Transforms/LoopVectorize/pointer-induction.ll index 69931a0..d2c53f4 100644 --- a/llvm/test/Transforms/LoopVectorize/pointer-induction.ll +++ b/llvm/test/Transforms/LoopVectorize/pointer-induction.ll @@ -231,7 +231,6 @@ define void @non_constant_vector_expansion(i32 %0, ptr %call) { ; STRIDED: vector.body: ; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; STRIDED-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ null, [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] -; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[TMP1]], 4 ; STRIDED-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP1]], i64 0 ; STRIDED-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i64> [[DOTSPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; STRIDED-NEXT: [[TMP4:%.*]] = mul <4 x i64> <i64 0, i64 1, i64 2, i64 3>, [[DOTSPLAT]] @@ -240,6 +239,7 @@ define void @non_constant_vector_expansion(i32 %0, ptr %call) { ; STRIDED-NEXT: [[TMP6:%.*]] = getelementptr ptr, ptr [[CALL:%.*]], i32 [[OFFSET_IDX]] ; STRIDED-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP6]], align 4 ; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[TMP1]], 4 ; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP3]] ; STRIDED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100 ; STRIDED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll index c044cc0..bda91ba 100644 --- a/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll +++ b/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll @@ -62,7 +62,7 @@ define void @pr45679(ptr %A) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]] ; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 @@ -124,7 +124,7 @@ define void @pr45679(ptr %A) { ; VF2UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; VF2UF2-NEXT: br label [[LOOP:%.*]] ; VF2UF2: loop: -; VF2UF2-NEXT: [[RIV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] +; VF2UF2-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] ; VF2UF2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]] ; VF2UF2-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1 ; VF2UF2-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 @@ -181,7 +181,7 @@ define void @pr45679(ptr %A) { ; VF1UF4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ] ; VF1UF4-NEXT: br label [[LOOP:%.*]] ; VF1UF4: loop: -; VF1UF4-NEXT: [[RIV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] +; VF1UF4-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ] ; VF1UF4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]] ; VF1UF4-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1 ; VF1UF4-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1 @@ -261,7 +261,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: store i64 [[V]], ptr [[B]], align 8 @@ -328,7 +328,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) { ; VF2UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; VF2UF2-NEXT: br label [[FOR_BODY:%.*]] ; VF2UF2: for.body: -; VF2UF2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; VF2UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; VF2UF2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; VF2UF2-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; VF2UF2-NEXT: store i64 [[V]], ptr [[B]], align 8 @@ -390,7 +390,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) { ; VF1UF4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; VF1UF4-NEXT: br label [[FOR_BODY:%.*]] ; VF1UF4: for.body: -; VF1UF4-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; VF1UF4-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; VF1UF4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; VF1UF4-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; VF1UF4-NEXT: store i64 [[V]], ptr [[B]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll b/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll index d4a6aed..7d6667c 100644 --- a/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll +++ b/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll @@ -36,7 +36,7 @@ define void @test(i16 %x, i64 %y, ptr %ptr) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[LOOP_PREHEADER]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: store i32 0, ptr [[PTR]], align 4 ; CHECK-NEXT: [[V2:%.*]] = trunc i64 [[IV]] to i8 ; CHECK-NEXT: [[V3:%.*]] = add i8 [[V2]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll index 77794dc..19c9ccc 100644 --- a/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll +++ b/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll @@ -67,8 +67,8 @@ define dso_local i16 @reverse_interleave_load_fold_mask() optsize { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i16 [ 0, [[ENTRY]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IVMINUS1:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi i16 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[PREVSUM:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 41, [[SCALAR_PH]] ], [ [[IVMINUS1:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[SUM:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[PREVSUM:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[IVMINUS1]] = add nsw i16 [[IV]], -1 ; CHECK-NEXT: [[GEPA0:%.*]] = getelementptr inbounds [40 x [4 x i16]], ptr @A, i16 0, i16 [[IVMINUS1]], i16 0 ; CHECK-NEXT: [[TMP29:%.*]] = load i16, ptr [[GEPA0]], align 1 diff --git a/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll b/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll index ffe118b..90caee3 100644 --- a/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll +++ b/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll @@ -63,7 +63,7 @@ define void @loop_invariant_store(ptr %p, i64 %a, i8 %b) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ] ; CHECK-NEXT: [[ADD]] = add i32 [[IV]], 1 ; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2 ; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48 @@ -181,7 +181,7 @@ define void @loop_invariant_srem(ptr %p, i64 %a, i8 %b) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] ; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1 ; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i8 [[IV]], 2 ; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48 @@ -253,7 +253,7 @@ define void @loop_invariant_float_store(ptr %p, i32 %a) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 ; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2 ; CHECK-NEXT: br i1 [[CMP_SLT]], label %[[COND_FALSE:.*]], label %[[LOOP_LATCH]] @@ -324,7 +324,7 @@ define void @test_store_to_invariant_address_needs_mask_due_to_low_trip_count(pt ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] ; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]] ; CHECK: [[ELSE]]: ; CHECK-NEXT: br label %[[LOOP_LATCH]] diff --git a/llvm/test/Transforms/LoopVectorize/scalable-predication.ll b/llvm/test/Transforms/LoopVectorize/scalable-predication.ll index 8e272de..a3a4c29 100644 --- a/llvm/test/Transforms/LoopVectorize/scalable-predication.ll +++ b/llvm/test/Transforms/LoopVectorize/scalable-predication.ll @@ -34,7 +34,7 @@ define void @foo(i32 %val, ptr dereferenceable(1024) %ptr) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[WHILE_BODY:%.*]] ; CHECK: while.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX]] ; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[GEP]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll b/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll index b2acc64..77f2fc5 100644 --- a/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll +++ b/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll @@ -96,17 +96,17 @@ define void @integer_induction_wraps_scev_predicate_known(i32 %x, ptr %call, ptr ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP0]], 4 -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i64> [[DOTSPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP4:%.*]] = mul <4 x i64> <i64 0, i64 1, i64 2, i64 3>, [[DOTSPLAT]] -; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> [[TMP4]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i64> <i64 0, i64 1, i64 2, i64 3>, [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> [[TMP3]] ; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[INDEX]] to i32 ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 30, [[DOTCAST]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[OFFSET_IDX]] -; CHECK-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP5]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[OFFSET_IDX]] +; CHECK-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP4]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP0]], 4 +; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP5]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 992 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/select-reduction.ll b/llvm/test/Transforms/LoopVectorize/select-reduction.ll index cfc9bb2..03b3ff2 100644 --- a/llvm/test/Transforms/LoopVectorize/select-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/select-reduction.ll @@ -42,8 +42,8 @@ define i32 @test(i64 %N, i32 %x) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[EXTRA_ITER]], [[LOOP_PREHEADER]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[NEXT:%.*]] = phi i32 [ [[SEL:%.*]], [[LOOP]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[NEXT:%.*]] = phi i32 [ [[SEL:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[EXTRA_ITER]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[SEL_COND:%.*]] = icmp sgt i32 [[NEXT]], 10 ; CHECK-NEXT: [[SEL]] = select i1 [[SEL_COND]], i32 [[NEXT]], i32 10 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1 @@ -98,8 +98,8 @@ define i32 @pr66895_tail_fold_reduction_exit_inst_gets_simplified(i32 %n) { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 12, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], -1 ; CHECK-NEXT: [[RED_NEXT]] = mul i32 [[RED]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 0 diff --git a/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll b/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll index bf86cbd..6052224 100644 --- a/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll +++ b/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll @@ -47,8 +47,8 @@ define void @pr75298_store_reduction_value_in_folded_loop(i64 %iv.start) optsize ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[PH]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_START]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[RED:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[L:%.*]] = load i32, ptr @c, align 4 ; CHECK-NEXT: [[RED_NEXT]] = xor i32 [[RED]], [[L]] ; CHECK-NEXT: store i32 [[RED_NEXT]], ptr @a, align 4 diff --git a/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll index eefa3da..e7b243e 100644 --- a/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll +++ b/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll @@ -29,8 +29,8 @@ define float @pr70988() { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], 1.000000e+00 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw nsw i32 [[INDEX]], 1 ; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[INDEX_NEXT]], 1021 @@ -64,8 +64,8 @@ define float @pr70988() { ; CHECK-ALM-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ] ; CHECK-ALM-NEXT: br label [[LOOP:%.*]] ; CHECK-ALM: loop: -; CHECK-ALM-NEXT: [[INDEX:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] +; CHECK-ALM-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ] +; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] ; CHECK-ALM-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], 1.000000e+00 ; CHECK-ALM-NEXT: [[INDEX_NEXT]] = add nuw nsw i32 [[INDEX]], 1 ; CHECK-ALM-NEXT: [[COND:%.*]] = icmp ult i32 [[INDEX_NEXT]], 1021 @@ -133,8 +133,8 @@ define float @pr72720reduction_using_active_lane_mask(ptr %src) { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NARROW:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[NARROW:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[NARROW]] = add nuw nsw i32 [[IV]], 1 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[SRC]], i32 [[IV]] ; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP]], align 4 @@ -185,8 +185,8 @@ define float @pr72720reduction_using_active_lane_mask(ptr %src) { ; CHECK-ALM-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ] ; CHECK-ALM-NEXT: br label [[LOOP:%.*]] ; CHECK-ALM: loop: -; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NARROW:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] +; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[NARROW:%.*]], [[LOOP]] ] +; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ] ; CHECK-ALM-NEXT: [[NARROW]] = add nuw nsw i32 [[IV]], 1 ; CHECK-ALM-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[SRC]], i32 [[IV]] ; CHECK-ALM-NEXT: [[L:%.*]] = load float, ptr [[GEP]], align 4 @@ -243,8 +243,8 @@ define float @fadd_reduction_with_live_in(float %inc) { ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-NEXT: [[SUM:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[SUM:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[SUM_NEXT]] = fadd float [[SUM]], [[INC]] ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000 @@ -279,8 +279,8 @@ define float @fadd_reduction_with_live_in(float %inc) { ; CHECK-ALM-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ] ; CHECK-ALM-NEXT: br label [[LOOP:%.*]] ; CHECK-ALM: loop: -; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; CHECK-ALM-NEXT: [[SUM:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ] +; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-ALM-NEXT: [[SUM:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ] ; CHECK-ALM-NEXT: [[SUM_NEXT]] = fadd float [[SUM]], [[INC]] ; CHECK-ALM-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 ; CHECK-ALM-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000 diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll index 3cf8b3f..9f33db8 100644 --- a/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll +++ b/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll @@ -58,7 +58,7 @@ define i32 @test(ptr %vf1, i64 %n) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ] ; CHECK-NEXT: [[TMP18:%.*]] = alloca i8, i64 [[N]], align 16 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[VF1]], i64 [[INDVARS_IV]] ; CHECK-NEXT: store ptr [[TMP18]], ptr [[ARRAYIDX]], align 8 diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll index efc2b8d..ac15787 100644 --- a/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll +++ b/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll @@ -38,7 +38,7 @@ define void @canonical_small_tc_i8(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] ; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 @@ -99,7 +99,7 @@ define void @canonical_upper_limit_i8(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] ; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 @@ -160,7 +160,7 @@ define void @canonical_lower_limit_i16(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] ; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 @@ -221,7 +221,7 @@ define void @canonical_upper_limit_i16(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] ; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 @@ -282,7 +282,7 @@ define void @canonical_lower_limit_i32(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] ; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 @@ -343,7 +343,7 @@ define void @canonical_upper_limit_i32(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] ; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 @@ -404,7 +404,7 @@ define void @canonical_lower_limit_i64(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] ; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 @@ -465,7 +465,7 @@ define void @canonical_upper_limit_i64(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]] ; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 @@ -526,7 +526,7 @@ define void @canonical_lower_limit_i128(ptr nocapture noundef writeonly %p) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i256 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i256 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i256 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i256 [[IV]] ; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i256 [[IV]], 1 diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll index 222c1ee..6f4bb1d 100644 --- a/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll +++ b/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll @@ -59,7 +59,7 @@ define void @tail_fold_switch(ptr %dst, i32 %0) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] ; CHECK-NEXT: switch i32 [[TMP0]], label %[[LOOP_LATCH]] [ ; CHECK-NEXT: i32 0, label %[[LOOP_LATCH]] ; CHECK-NEXT: i32 1, label %[[IF_THEN:.*]] diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll index 13d5be1..d39a146 100644 --- a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll +++ b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll @@ -60,7 +60,7 @@ define void @VF1-VPlanExe(ptr %dst) { ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[INDVARS_IV]] ; CHECK-NEXT: store i32 0, ptr [[DST_PTR]], align 4 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 @@ -140,7 +140,7 @@ define void @VF1-VPWidenCanonicalIVRecipeExe(ptr %ptr1) { ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; CHECK: for.body: -; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[PTR1]], [[SCALAR_PH]] ] ; CHECK-NEXT: store double 0.000000e+00, ptr [[ADDR]], align 8 ; CHECK-NEXT: [[PTR]] = getelementptr inbounds double, ptr [[ADDR]], i64 1 ; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[PTR]], [[PTR2]] diff --git a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll index 85cf925..a35e763 100644 --- a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll @@ -302,7 +302,7 @@ define void @redundant_branch_and_blends_without_mask(ptr %A) { ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] ; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] ; CHECK-NEXT: [[GEP_IV:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_IV]], align 4 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[L]], 10 diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll index 59c76ae..983f327 100644 --- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll +++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll @@ -224,7 +224,7 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5, ; VF8UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[ENTRY]] ] ; VF8UF1-NEXT: br label %[[LOOP:.*]] ; VF8UF1: [[LOOP]]: -; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]] ; VF8UF1-NEXT: store i16 0, ptr [[GEP_DST]], align 2 ; VF8UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 @@ -368,7 +368,7 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5, ; VF8UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[ENTRY]] ] ; VF8UF2-NEXT: br label %[[LOOP:.*]] ; VF8UF2: [[LOOP]]: -; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]] ; VF8UF2-NEXT: store i16 0, ptr [[GEP_DST]], align 2 ; VF8UF2-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 @@ -511,7 +511,7 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5, ; VF16UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[ENTRY]] ] ; VF16UF1-NEXT: br label %[[LOOP:.*]] ; VF16UF1: [[LOOP]]: -; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]] ; VF16UF1-NEXT: store i16 0, ptr [[GEP_DST]], align 2 ; VF16UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 @@ -797,7 +797,7 @@ define void @scev_expand_step(i64 %x, ptr %dst) { ; VF8UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; VF8UF1-NEXT: br label %[[LOOP:.*]] ; VF8UF1: [[LOOP]]: -; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; VF8UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]] ; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]] ; VF8UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1 @@ -994,7 +994,7 @@ define void @scev_expand_step(i64 %x, ptr %dst) { ; VF8UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; VF8UF2-NEXT: br label %[[LOOP:.*]] ; VF8UF2: [[LOOP]]: -; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; VF8UF2-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]] ; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]] ; VF8UF2-NEXT: store i8 0, ptr [[GEP_DST]], align 1 @@ -1190,7 +1190,7 @@ define void @scev_expand_step(i64 %x, ptr %dst) { ; VF16UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ] ; VF16UF1-NEXT: br label %[[LOOP:.*]] ; VF16UF1: [[LOOP]]: -; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; VF16UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]] ; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]] ; VF16UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1 diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll new file mode 100644 index 0000000..301e5da --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll @@ -0,0 +1,279 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -passes=slp-vectorizer -mtriple=aarch64 < %s | FileCheck %s + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" + +define void @ldexp_f32i32(ptr %x, ptr %y, i32 %exp) { +; CHECK-LABEL: @ldexp_f32i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1 +; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2 +; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3 +; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4 +; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L0]], i32 [[EXP:%.*]]) +; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L2]], i32 [[EXP]]) +; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L4]], i32 [[EXP]]) +; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L6]], i32 [[EXP]]) +; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1 +; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2 +; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3 +; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %l0 = load float, ptr %x, align 4 + %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1 + %l2 = load float, ptr %arrayidx.1, align 4 + %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2 + %l4 = load float, ptr %arrayidx.2, align 4 + %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3 + %l6 = load float, ptr %arrayidx.3, align 4 + %l1 = tail call float @llvm.ldexp.f32.i32(float %l0, i32 %exp) + %l3 = tail call float @llvm.ldexp.f32.i32(float %l2, i32 %exp) + %l5 = tail call float @llvm.ldexp.f32.i32(float %l4, i32 %exp) + %l7 = tail call float @llvm.ldexp.f32.i32(float %l6, i32 %exp) + store float %l1, ptr %y, align 4 + %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1 + store float %l3, ptr %arrayidx2.1, align 4 + %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2 + store float %l5, ptr %arrayidx2.2, align 4 + %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3 + store float %l7, ptr %arrayidx2.3, align 4 + ret void +} + +define void @ldexp_f64i32(ptr %x, ptr %y, i32 %exp) { +; CHECK-LABEL: @ldexp_f64i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1 +; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2 +; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3 +; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4 +; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L0]], i32 [[EXP:%.*]]) +; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L2]], i32 [[EXP]]) +; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L4]], i32 [[EXP]]) +; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L6]], i32 [[EXP]]) +; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1 +; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2 +; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3 +; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %l0 = load double, ptr %x, align 4 + %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1 + %l2 = load double, ptr %arrayidx.1, align 4 + %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2 + %l4 = load double, ptr %arrayidx.2, align 4 + %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3 + %l6 = load double, ptr %arrayidx.3, align 4 + %l1 = tail call double @llvm.ldexp.f64.i32(double %l0, i32 %exp) + %l3 = tail call double @llvm.ldexp.f64.i32(double %l2, i32 %exp) + %l5 = tail call double @llvm.ldexp.f64.i32(double %l4, i32 %exp) + %l7 = tail call double @llvm.ldexp.f64.i32(double %l6, i32 %exp) + store double %l1, ptr %y, align 4 + %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1 + store double %l3, ptr %arrayidx2.1, align 4 + %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2 + store double %l5, ptr %arrayidx2.2, align 4 + %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3 + store double %l7, ptr %arrayidx2.3, align 4 + ret void +} + +define void @ldexp_f32i64(ptr %x, ptr %y, i64 %exp) { +; CHECK-LABEL: @ldexp_f32i64( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1 +; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2 +; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3 +; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4 +; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L0]], i64 [[EXP:%.*]]) +; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L2]], i64 [[EXP]]) +; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L4]], i64 [[EXP]]) +; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L6]], i64 [[EXP]]) +; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1 +; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2 +; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3 +; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %l0 = load float, ptr %x, align 4 + %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1 + %l2 = load float, ptr %arrayidx.1, align 4 + %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2 + %l4 = load float, ptr %arrayidx.2, align 4 + %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3 + %l6 = load float, ptr %arrayidx.3, align 4 + %l1 = tail call float @llvm.ldexp.f32.i64(float %l0, i64 %exp) + %l3 = tail call float @llvm.ldexp.f32.i64(float %l2, i64 %exp) + %l5 = tail call float @llvm.ldexp.f32.i64(float %l4, i64 %exp) + %l7 = tail call float @llvm.ldexp.f32.i64(float %l6, i64 %exp) + store float %l1, ptr %y, align 4 + %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1 + store float %l3, ptr %arrayidx2.1, align 4 + %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2 + store float %l5, ptr %arrayidx2.2, align 4 + %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3 + store float %l7, ptr %arrayidx2.3, align 4 + ret void +} + +define void @ldexp_f64i64(ptr %x, ptr %y, i64 %exp) { +; CHECK-LABEL: @ldexp_f64i64( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1 +; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2 +; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3 +; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4 +; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L0]], i64 [[EXP:%.*]]) +; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L2]], i64 [[EXP]]) +; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L4]], i64 [[EXP]]) +; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L6]], i64 [[EXP]]) +; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1 +; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2 +; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3 +; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %l0 = load double, ptr %x, align 4 + %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1 + %l2 = load double, ptr %arrayidx.1, align 4 + %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2 + %l4 = load double, ptr %arrayidx.2, align 4 + %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3 + %l6 = load double, ptr %arrayidx.3, align 4 + %l1 = tail call double @llvm.ldexp.f64.i64(double %l0, i64 %exp) + %l3 = tail call double @llvm.ldexp.f64.i64(double %l2, i64 %exp) + %l5 = tail call double @llvm.ldexp.f64.i64(double %l4, i64 %exp) + %l7 = tail call double @llvm.ldexp.f64.i64(double %l6, i64 %exp) + store double %l1, ptr %y, align 4 + %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1 + store double %l3, ptr %arrayidx2.1, align 4 + %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2 + store double %l5, ptr %arrayidx2.2, align 4 + %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3 + store double %l7, ptr %arrayidx2.3, align 4 + ret void +} + +define void @ldexp_f32i32_i64(ptr %x, ptr %y, i32 %exp32, i64 %exp64) { +; CHECK-LABEL: @ldexp_f32i32_i64( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1 +; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2 +; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3 +; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4 +; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L0]], i32 [[EXP32:%.*]]) +; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L2]], i32 [[EXP32]]) +; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L4]], i64 [[EXP64:%.*]]) +; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L6]], i64 [[EXP64]]) +; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1 +; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2 +; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3 +; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %l0 = load float, ptr %x, align 4 + %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1 + %l2 = load float, ptr %arrayidx.1, align 4 + %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2 + %l4 = load float, ptr %arrayidx.2, align 4 + %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3 + %l6 = load float, ptr %arrayidx.3, align 4 + %l1 = tail call float @llvm.ldexp.f32.i32(float %l0, i32 %exp32) + %l3 = tail call float @llvm.ldexp.f32.i32(float %l2, i32 %exp32) + %l5 = tail call float @llvm.ldexp.f32.i64(float %l4, i64 %exp64) + %l7 = tail call float @llvm.ldexp.f32.i64(float %l6, i64 %exp64) + store float %l1, ptr %y, align 4 + %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1 + store float %l3, ptr %arrayidx2.1, align 4 + %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2 + store float %l5, ptr %arrayidx2.2, align 4 + %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3 + store float %l7, ptr %arrayidx2.3, align 4 + ret void +} + +define void @ldexp_f64_i32_i64(ptr %x, ptr %y, i32 %exp32, i64 %exp64) { +; CHECK-LABEL: @ldexp_f64_i32_i64( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1 +; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2 +; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3 +; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4 +; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L0]], i32 [[EXP32:%.*]]) +; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L2]], i32 [[EXP32]]) +; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L4]], i64 [[EXP64:%.*]]) +; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L6]], i64 [[EXP64]]) +; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1 +; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2 +; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3 +; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %l0 = load double, ptr %x, align 4 + %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1 + %l2 = load double, ptr %arrayidx.1, align 4 + %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2 + %l4 = load double, ptr %arrayidx.2, align 4 + %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3 + %l6 = load double, ptr %arrayidx.3, align 4 + %l1 = tail call double @llvm.ldexp.f64.i32(double %l0, i32 %exp32) + %l3 = tail call double @llvm.ldexp.f64.i32(double %l2, i32 %exp32) + %l5 = tail call double @llvm.ldexp.f64.i64(double %l4, i64 %exp64) + %l7 = tail call double @llvm.ldexp.f64.i64(double %l6, i64 %exp64) + store double %l1, ptr %y, align 4 + %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1 + store double %l3, ptr %arrayidx2.1, align 4 + %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2 + store double %l5, ptr %arrayidx2.2, align 4 + %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3 + store double %l7, ptr %arrayidx2.3, align 4 + ret void +} + +declare float @llvm.ldexp.f32.i32(float, i32) +declare double @llvm.ldexp.f64.i32(double, i32) +declare float @llvm.ldexp.f32.i64(float, i64) +declare double @llvm.ldexp.f64.i64(double, i64) diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll new file mode 100644 index 0000000..07a3fe7 --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll @@ -0,0 +1,280 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -passes=slp-vectorizer -mtriple=aarch64 < %s | FileCheck %s + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" + +define void @lround_i32f32(ptr %x, ptr %y, i32 %n) { +; CHECK-LABEL: @lround_i32f32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1 +; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2 +; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3 +; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4 +; CHECK-NEXT: [[L1:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L0]]) +; CHECK-NEXT: [[L3:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L2]]) +; CHECK-NEXT: [[L5:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L4]]) +; CHECK-NEXT: [[L7:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L6]]) +; CHECK-NEXT: store i32 [[L1]], ptr [[Y:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 1 +; CHECK-NEXT: store i32 [[L3]], ptr [[ARRAYIDX2_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 2 +; CHECK-NEXT: store i32 [[L5]], ptr [[ARRAYIDX2_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 3 +; CHECK-NEXT: store i32 [[L7]], ptr [[ARRAYIDX2_3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %l0 = load float, ptr %x, align 4 + %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1 + %l2 = load float, ptr %arrayidx.1, align 4 + %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2 + %l4 = load float, ptr %arrayidx.2, align 4 + %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3 + %l6 = load float, ptr %arrayidx.3, align 4 + %l1 = tail call i32 @llvm.lround.i32.f32(float %l0) + %l3 = tail call i32 @llvm.lround.i32.f32(float %l2) + %l5 = tail call i32 @llvm.lround.i32.f32(float %l4) + %l7 = tail call i32 @llvm.lround.i32.f32(float %l6) + store i32 %l1, ptr %y, align 4 + %arrayidx2.1 = getelementptr inbounds i32, ptr %y, i64 1 + store i32 %l3, ptr %arrayidx2.1, align 4 + %arrayidx2.2 = getelementptr inbounds i32, ptr %y, i64 2 + store i32 %l5, ptr %arrayidx2.2, align 4 + %arrayidx2.3 = getelementptr inbounds i32, ptr %y, i64 3 + store i32 %l7, ptr %arrayidx2.3, align 4 + ret void +} + +define void @lround_i32f64(ptr %x, ptr %y, i32 %n) { +; CHECK-LABEL: @lround_i32f64( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1 +; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2 +; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3 +; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4 +; CHECK-NEXT: [[L1:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L0]]) +; CHECK-NEXT: [[L3:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L2]]) +; CHECK-NEXT: [[L5:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L4]]) +; CHECK-NEXT: [[L7:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L6]]) +; CHECK-NEXT: store i32 [[L1]], ptr [[Y:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 1 +; CHECK-NEXT: store i32 [[L3]], ptr [[ARRAYIDX2_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 2 +; CHECK-NEXT: store i32 [[L5]], ptr [[ARRAYIDX2_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 3 +; CHECK-NEXT: store i32 [[L7]], ptr [[ARRAYIDX2_3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %l0 = load double, ptr %x, align 4 + %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1 + %l2 = load double, ptr %arrayidx.1, align 4 + %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2 + %l4 = load double, ptr %arrayidx.2, align 4 + %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3 + %l6 = load double, ptr %arrayidx.3, align 4 + %l1 = tail call i32 @llvm.lround.i32.f64(double %l0) + %l3 = tail call i32 @llvm.lround.i32.f64(double %l2) + %l5 = tail call i32 @llvm.lround.i32.f64(double %l4) + %l7 = tail call i32 @llvm.lround.i32.f64(double %l6) + store i32 %l1, ptr %y, align 4 + %arrayidx2.1 = getelementptr inbounds i32, ptr %y, i64 1 + store i32 %l3, ptr %arrayidx2.1, align 4 + %arrayidx2.2 = getelementptr inbounds i32, ptr %y, i64 2 + store i32 %l5, ptr %arrayidx2.2, align 4 + %arrayidx2.3 = getelementptr inbounds i32, ptr %y, i64 3 + store i32 %l7, ptr %arrayidx2.3, align 4 + ret void +} + +define void @lround_i64f32(ptr %x, ptr %y, i64 %n) { +; CHECK-LABEL: @lround_i64f32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1 +; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2 +; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3 +; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4 +; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L0]]) +; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L2]]) +; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L4]]) +; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L6]]) +; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1 +; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2 +; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3 +; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %l0 = load float, ptr %x, align 4 + %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1 + %l2 = load float, ptr %arrayidx.1, align 4 + %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2 + %l4 = load float, ptr %arrayidx.2, align 4 + %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3 + %l6 = load float, ptr %arrayidx.3, align 4 + %l1 = tail call i64 @llvm.lround.i64.f32(float %l0) + %l3 = tail call i64 @llvm.lround.i64.f32(float %l2) + %l5 = tail call i64 @llvm.lround.i64.f32(float %l4) + %l7 = tail call i64 @llvm.lround.i64.f32(float %l6) + store i64 %l1, ptr %y, align 4 + %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1 + store i64 %l3, ptr %arrayidx2.1, align 4 + %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2 + store i64 %l5, ptr %arrayidx2.2, align 4 + %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3 + store i64 %l7, ptr %arrayidx2.3, align 4 + ret void +} + +define void @lround_i64f64(ptr %x, ptr %y, i64 %n) { +; CHECK-LABEL: @lround_i64f64( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1 +; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2 +; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3 +; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4 +; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L0]]) +; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L2]]) +; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L4]]) +; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L6]]) +; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1 +; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2 +; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3 +; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %l0 = load double, ptr %x, align 4 + %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1 + %l2 = load double, ptr %arrayidx.1, align 4 + %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2 + %l4 = load double, ptr %arrayidx.2, align 4 + %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3 + %l6 = load double, ptr %arrayidx.3, align 4 + %l1 = tail call i64 @llvm.lround.i64.f64(double %l0) + %l3 = tail call i64 @llvm.lround.i64.f64(double %l2) + %l5 = tail call i64 @llvm.lround.i64.f64(double %l4) + %l7 = tail call i64 @llvm.lround.i64.f64(double %l6) + store i64 %l1, ptr %y, align 4 + %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1 + store i64 %l3, ptr %arrayidx2.1, align 4 + %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2 + store i64 %l5, ptr %arrayidx2.2, align 4 + %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3 + store i64 %l7, ptr %arrayidx2.3, align 4 + ret void +} + +define void @llround_i64f32(ptr %x, ptr %y, i64 %n) { +; CHECK-LABEL: @llround_i64f32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1 +; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2 +; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3 +; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4 +; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L0]]) +; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L2]]) +; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L4]]) +; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L6]]) +; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1 +; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2 +; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3 +; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %l0 = load float, ptr %x, align 4 + %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1 + %l2 = load float, ptr %arrayidx.1, align 4 + %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2 + %l4 = load float, ptr %arrayidx.2, align 4 + %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3 + %l6 = load float, ptr %arrayidx.3, align 4 + %l1 = tail call i64 @llvm.llround.i64.f32(float %l0) + %l3 = tail call i64 @llvm.llround.i64.f32(float %l2) + %l5 = tail call i64 @llvm.llround.i64.f32(float %l4) + %l7 = tail call i64 @llvm.llround.i64.f32(float %l6) + store i64 %l1, ptr %y, align 4 + %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1 + store i64 %l3, ptr %arrayidx2.1, align 4 + %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2 + store i64 %l5, ptr %arrayidx2.2, align 4 + %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3 + store i64 %l7, ptr %arrayidx2.3, align 4 + ret void +} + +define void @llround_i64f64(ptr %x, ptr %y, i64 %n) { +; CHECK-LABEL: @llround_i64f64( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1 +; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2 +; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3 +; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4 +; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L0]]) +; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L2]]) +; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L4]]) +; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L6]]) +; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1 +; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2 +; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3 +; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4 +; CHECK-NEXT: ret void +; +entry: + %l0 = load double, ptr %x, align 4 + %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1 + %l2 = load double, ptr %arrayidx.1, align 4 + %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2 + %l4 = load double, ptr %arrayidx.2, align 4 + %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3 + %l6 = load double, ptr %arrayidx.3, align 4 + %l1 = tail call i64 @llvm.llround.i64.f64(double %l0) + %l3 = tail call i64 @llvm.llround.i64.f64(double %l2) + %l5 = tail call i64 @llvm.llround.i64.f64(double %l4) + %l7 = tail call i64 @llvm.llround.i64.f64(double %l6) + store i64 %l1, ptr %y, align 4 + %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1 + store i64 %l3, ptr %arrayidx2.1, align 4 + %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2 + store i64 %l5, ptr %arrayidx2.2, align 4 + %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3 + store i64 %l7, ptr %arrayidx2.3, align 4 + ret void +} + +declare i32 @llvm.lround.i32.f32(float) +declare i64 @llvm.lround.i64.f32(float) +declare i64 @llvm.lround.i64.f64(double) +declare i64 @llvm.llround.i64.f32(float) +declare i64 @llvm.llround.i64.f64(double) diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll new file mode 100644 index 0000000..645dbc4 --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll @@ -0,0 +1,741 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 + +; RUN: opt -mtriple=riscv64 -mattr=+m,+v -passes=slp-vectorizer -S < %s | FileCheck %s + +define void @const_stride_1_no_reordering(ptr %pl, ptr %ps) { +; CHECK-LABEL: define void @const_stride_1_no_reordering( +; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 +; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 16 +; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: ret void +; + %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 + %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1 + %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2 + %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3 + %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 4 + %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 5 + %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 6 + %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 7 + %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 8 + %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 9 + %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 10 + %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 11 + %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 12 + %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 13 + %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 14 + %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 15 + + %load0 = load i8, ptr %gep_l0 , align 16 + %load1 = load i8, ptr %gep_l1 , align 16 + %load2 = load i8, ptr %gep_l2 , align 16 + %load3 = load i8, ptr %gep_l3 , align 16 + %load4 = load i8, ptr %gep_l4 , align 16 + %load5 = load i8, ptr %gep_l5 , align 16 + %load6 = load i8, ptr %gep_l6 , align 16 + %load7 = load i8, ptr %gep_l7 , align 16 + %load8 = load i8, ptr %gep_l8 , align 16 + %load9 = load i8, ptr %gep_l9 , align 16 + %load10 = load i8, ptr %gep_l10, align 16 + %load11 = load i8, ptr %gep_l11, align 16 + %load12 = load i8, ptr %gep_l12, align 16 + %load13 = load i8, ptr %gep_l13, align 16 + %load14 = load i8, ptr %gep_l14, align 16 + %load15 = load i8, ptr %gep_l15, align 16 + + %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 + %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 + %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2 + %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3 + %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4 + %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5 + %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6 + %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7 + %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8 + %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9 + %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10 + %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11 + %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12 + %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13 + %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 + %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 + + store i8 %load0, ptr %gep_s0, align 16 + store i8 %load1, ptr %gep_s1, align 16 + store i8 %load2, ptr %gep_s2, align 16 + store i8 %load3, ptr %gep_s3, align 16 + store i8 %load4, ptr %gep_s4, align 16 + store i8 %load5, ptr %gep_s5, align 16 + store i8 %load6, ptr %gep_s6, align 16 + store i8 %load7, ptr %gep_s7, align 16 + store i8 %load8, ptr %gep_s8, align 16 + store i8 %load9, ptr %gep_s9, align 16 + store i8 %load10, ptr %gep_s10, align 16 + store i8 %load11, ptr %gep_s11, align 16 + store i8 %load12, ptr %gep_s12, align 16 + store i8 %load13, ptr %gep_s13, align 16 + store i8 %load14, ptr %gep_s14, align 16 + store i8 %load15, ptr %gep_s15, align 16 + + ret void +} + +define void @const_stride_1_with_reordering(ptr %pl, ptr %ps) { +; CHECK-LABEL: define void @const_stride_1_with_reordering( +; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 +; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 16 +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> +; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: ret void +; + %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 + %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1 + %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2 + %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3 + %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 4 + %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 5 + %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 6 + %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 7 + %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 8 + %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 9 + %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 10 + %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 11 + %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 12 + %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 13 + %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 14 + %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 15 + + %load0 = load i8, ptr %gep_l0 , align 16 + %load1 = load i8, ptr %gep_l1 , align 16 + %load2 = load i8, ptr %gep_l2 , align 16 + %load3 = load i8, ptr %gep_l3 , align 16 + %load4 = load i8, ptr %gep_l4 , align 16 + %load5 = load i8, ptr %gep_l5 , align 16 + %load6 = load i8, ptr %gep_l6 , align 16 + %load7 = load i8, ptr %gep_l7 , align 16 + %load8 = load i8, ptr %gep_l8 , align 16 + %load9 = load i8, ptr %gep_l9 , align 16 + %load10 = load i8, ptr %gep_l10, align 16 + %load11 = load i8, ptr %gep_l11, align 16 + %load12 = load i8, ptr %gep_l12, align 16 + %load13 = load i8, ptr %gep_l13, align 16 + %load14 = load i8, ptr %gep_l14, align 16 + %load15 = load i8, ptr %gep_l15, align 16 + + %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 + %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 + %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2 + %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3 + %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4 + %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5 + %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6 + %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7 + %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8 + %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9 + %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10 + %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11 + %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12 + %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13 + %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 + %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 + + ; NOTE: value from %load1 in stored in %gep_s0 + store i8 %load1, ptr %gep_s0, align 16 + store i8 %load0, ptr %gep_s1, align 16 + store i8 %load2, ptr %gep_s2, align 16 + store i8 %load3, ptr %gep_s3, align 16 + store i8 %load4, ptr %gep_s4, align 16 + store i8 %load5, ptr %gep_s5, align 16 + store i8 %load6, ptr %gep_s6, align 16 + store i8 %load7, ptr %gep_s7, align 16 + store i8 %load8, ptr %gep_s8, align 16 + store i8 %load9, ptr %gep_s9, align 16 + store i8 %load10, ptr %gep_s10, align 16 + store i8 %load11, ptr %gep_s11, align 16 + store i8 %load12, ptr %gep_s12, align 16 + store i8 %load13, ptr %gep_s13, align 16 + store i8 %load14, ptr %gep_s14, align 16 + store i8 %load15, ptr %gep_s15, align 16 + + ret void +} + + +define void @const_stride_2_no_reordering(ptr %pl, ptr %ps) { +; CHECK-LABEL: define void @const_stride_2_no_reordering( +; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 +; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 16, <31 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <31 x i8> poison) +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <31 x i8> [[TMP2]], <31 x i8> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> +; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: ret void +; + %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 + %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 2 + %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 4 + %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 6 + %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 8 + %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 10 + %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 12 + %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 14 + %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 16 + %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 18 + %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 20 + %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 22 + %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 24 + %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 26 + %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 28 + %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 30 + + %load0 = load i8, ptr %gep_l0 , align 16 + %load1 = load i8, ptr %gep_l1 , align 16 + %load2 = load i8, ptr %gep_l2 , align 16 + %load3 = load i8, ptr %gep_l3 , align 16 + %load4 = load i8, ptr %gep_l4 , align 16 + %load5 = load i8, ptr %gep_l5 , align 16 + %load6 = load i8, ptr %gep_l6 , align 16 + %load7 = load i8, ptr %gep_l7 , align 16 + %load8 = load i8, ptr %gep_l8 , align 16 + %load9 = load i8, ptr %gep_l9 , align 16 + %load10 = load i8, ptr %gep_l10, align 16 + %load11 = load i8, ptr %gep_l11, align 16 + %load12 = load i8, ptr %gep_l12, align 16 + %load13 = load i8, ptr %gep_l13, align 16 + %load14 = load i8, ptr %gep_l14, align 16 + %load15 = load i8, ptr %gep_l15, align 16 + + %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 + %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 + %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2 + %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3 + %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4 + %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5 + %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6 + %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7 + %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8 + %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9 + %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10 + %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11 + %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12 + %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13 + %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 + %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 + + store i8 %load0, ptr %gep_s0, align 16 + store i8 %load1, ptr %gep_s1, align 16 + store i8 %load2, ptr %gep_s2, align 16 + store i8 %load3, ptr %gep_s3, align 16 + store i8 %load4, ptr %gep_s4, align 16 + store i8 %load5, ptr %gep_s5, align 16 + store i8 %load6, ptr %gep_s6, align 16 + store i8 %load7, ptr %gep_s7, align 16 + store i8 %load8, ptr %gep_s8, align 16 + store i8 %load9, ptr %gep_s9, align 16 + store i8 %load10, ptr %gep_s10, align 16 + store i8 %load11, ptr %gep_s11, align 16 + store i8 %load12, ptr %gep_s12, align 16 + store i8 %load13, ptr %gep_s13, align 16 + store i8 %load14, ptr %gep_s14, align 16 + store i8 %load15, ptr %gep_s15, align 16 + + ret void +} + +define void @const_stride_2_with_reordering(ptr %pl, ptr %ps) { +; CHECK-LABEL: define void @const_stride_2_with_reordering( +; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 +; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 +; CHECK-NEXT: [[TMP1:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 16, <31 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <31 x i8> poison) +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <31 x i8> [[TMP1]], <31 x i8> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <31 x i8> [[TMP1]], <31 x i8> poison, <16 x i32> <i32 2, i32 0, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> +; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: ret void +; + %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 + %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 2 + %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 4 + %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 6 + %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 8 + %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 10 + %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 12 + %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 14 + %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 16 + %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 18 + %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 20 + %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 22 + %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 24 + %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 26 + %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 28 + %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 30 + + %load0 = load i8, ptr %gep_l0 , align 16 + %load1 = load i8, ptr %gep_l1 , align 16 + %load2 = load i8, ptr %gep_l2 , align 16 + %load3 = load i8, ptr %gep_l3 , align 16 + %load4 = load i8, ptr %gep_l4 , align 16 + %load5 = load i8, ptr %gep_l5 , align 16 + %load6 = load i8, ptr %gep_l6 , align 16 + %load7 = load i8, ptr %gep_l7 , align 16 + %load8 = load i8, ptr %gep_l8 , align 16 + %load9 = load i8, ptr %gep_l9 , align 16 + %load10 = load i8, ptr %gep_l10, align 16 + %load11 = load i8, ptr %gep_l11, align 16 + %load12 = load i8, ptr %gep_l12, align 16 + %load13 = load i8, ptr %gep_l13, align 16 + %load14 = load i8, ptr %gep_l14, align 16 + %load15 = load i8, ptr %gep_l15, align 16 + + %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 + %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 + %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2 + %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3 + %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4 + %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5 + %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6 + %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7 + %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8 + %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9 + %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10 + %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11 + %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12 + %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13 + %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 + %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 + + store i8 %load1, ptr %gep_s0, align 16 + store i8 %load0, ptr %gep_s1, align 16 + store i8 %load2, ptr %gep_s2, align 16 + store i8 %load3, ptr %gep_s3, align 16 + store i8 %load4, ptr %gep_s4, align 16 + store i8 %load5, ptr %gep_s5, align 16 + store i8 %load6, ptr %gep_s6, align 16 + store i8 %load7, ptr %gep_s7, align 16 + store i8 %load8, ptr %gep_s8, align 16 + store i8 %load9, ptr %gep_s9, align 16 + store i8 %load10, ptr %gep_s10, align 16 + store i8 %load11, ptr %gep_s11, align 16 + store i8 %load12, ptr %gep_s12, align 16 + store i8 %load13, ptr %gep_s13, align 16 + store i8 %load14, ptr %gep_s14, align 16 + store i8 %load15, ptr %gep_s15, align 16 + + ret void +} + +define void @rt_stride_1_no_reordering(ptr %pl, i64 %stride, ptr %ps) { +; CHECK-LABEL: define void @rt_stride_1_no_reordering( +; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[STRIDE0:%.*]] = mul nsw i64 [[STRIDE]], 0 +; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[STRIDE0]] +; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 +; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[STRIDE]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 16 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16) +; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: ret void +; + %stride0 = mul nsw i64 %stride, 0 + %stride1 = mul nsw i64 %stride, 1 + %stride2 = mul nsw i64 %stride, 2 + %stride3 = mul nsw i64 %stride, 3 + %stride4 = mul nsw i64 %stride, 4 + %stride5 = mul nsw i64 %stride, 5 + %stride6 = mul nsw i64 %stride, 6 + %stride7 = mul nsw i64 %stride, 7 + %stride8 = mul nsw i64 %stride, 8 + %stride9 = mul nsw i64 %stride, 9 + %stride10 = mul nsw i64 %stride, 10 + %stride11 = mul nsw i64 %stride, 11 + %stride12 = mul nsw i64 %stride, 12 + %stride13 = mul nsw i64 %stride, 13 + %stride14 = mul nsw i64 %stride, 14 + %stride15 = mul nsw i64 %stride, 15 + + %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %stride0 + %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 %stride1 + %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 %stride2 + %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 %stride3 + %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 %stride4 + %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 %stride5 + %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 %stride6 + %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 %stride7 + %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 %stride8 + %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 %stride9 + %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 %stride10 + %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 %stride11 + %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 %stride12 + %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 %stride13 + %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %stride14 + %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %stride15 + + %load0 = load i8, ptr %gep_l0 , align 16 + %load1 = load i8, ptr %gep_l1 , align 16 + %load2 = load i8, ptr %gep_l2 , align 16 + %load3 = load i8, ptr %gep_l3 , align 16 + %load4 = load i8, ptr %gep_l4 , align 16 + %load5 = load i8, ptr %gep_l5 , align 16 + %load6 = load i8, ptr %gep_l6 , align 16 + %load7 = load i8, ptr %gep_l7 , align 16 + %load8 = load i8, ptr %gep_l8 , align 16 + %load9 = load i8, ptr %gep_l9 , align 16 + %load10 = load i8, ptr %gep_l10, align 16 + %load11 = load i8, ptr %gep_l11, align 16 + %load12 = load i8, ptr %gep_l12, align 16 + %load13 = load i8, ptr %gep_l13, align 16 + %load14 = load i8, ptr %gep_l14, align 16 + %load15 = load i8, ptr %gep_l15, align 16 + + %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 + %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 + %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2 + %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3 + %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4 + %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5 + %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6 + %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7 + %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8 + %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9 + %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10 + %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11 + %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12 + %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13 + %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 + %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 + + store i8 %load0, ptr %gep_s0, align 16 + store i8 %load1, ptr %gep_s1, align 16 + store i8 %load2, ptr %gep_s2, align 16 + store i8 %load3, ptr %gep_s3, align 16 + store i8 %load4, ptr %gep_s4, align 16 + store i8 %load5, ptr %gep_s5, align 16 + store i8 %load6, ptr %gep_s6, align 16 + store i8 %load7, ptr %gep_s7, align 16 + store i8 %load8, ptr %gep_s8, align 16 + store i8 %load9, ptr %gep_s9, align 16 + store i8 %load10, ptr %gep_s10, align 16 + store i8 %load11, ptr %gep_s11, align 16 + store i8 %load12, ptr %gep_s12, align 16 + store i8 %load13, ptr %gep_s13, align 16 + store i8 %load14, ptr %gep_s14, align 16 + store i8 %load15, ptr %gep_s15, align 16 + + ret void +} + +define void @rt_stride_1_with_reordering(ptr %pl, i64 %stride, ptr %ps) { +; CHECK-LABEL: define void @rt_stride_1_with_reordering( +; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[STRIDE0:%.*]] = mul nsw i64 [[STRIDE]], 0 +; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[STRIDE0]] +; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 +; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[STRIDE]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 16 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16) +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> +; CHECK-NEXT: store <16 x i8> [[TMP3]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: ret void +; + %stride0 = mul nsw i64 %stride, 0 + %stride1 = mul nsw i64 %stride, 1 + %stride2 = mul nsw i64 %stride, 2 + %stride3 = mul nsw i64 %stride, 3 + %stride4 = mul nsw i64 %stride, 4 + %stride5 = mul nsw i64 %stride, 5 + %stride6 = mul nsw i64 %stride, 6 + %stride7 = mul nsw i64 %stride, 7 + %stride8 = mul nsw i64 %stride, 8 + %stride9 = mul nsw i64 %stride, 9 + %stride10 = mul nsw i64 %stride, 10 + %stride11 = mul nsw i64 %stride, 11 + %stride12 = mul nsw i64 %stride, 12 + %stride13 = mul nsw i64 %stride, 13 + %stride14 = mul nsw i64 %stride, 14 + %stride15 = mul nsw i64 %stride, 15 + + %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %stride0 + %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 %stride1 + %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 %stride2 + %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 %stride3 + %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 %stride4 + %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 %stride5 + %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 %stride6 + %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 %stride7 + %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 %stride8 + %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 %stride9 + %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 %stride10 + %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 %stride11 + %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 %stride12 + %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 %stride13 + %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %stride14 + %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %stride15 + + %load0 = load i8, ptr %gep_l0 , align 16 + %load1 = load i8, ptr %gep_l1 , align 16 + %load2 = load i8, ptr %gep_l2 , align 16 + %load3 = load i8, ptr %gep_l3 , align 16 + %load4 = load i8, ptr %gep_l4 , align 16 + %load5 = load i8, ptr %gep_l5 , align 16 + %load6 = load i8, ptr %gep_l6 , align 16 + %load7 = load i8, ptr %gep_l7 , align 16 + %load8 = load i8, ptr %gep_l8 , align 16 + %load9 = load i8, ptr %gep_l9 , align 16 + %load10 = load i8, ptr %gep_l10, align 16 + %load11 = load i8, ptr %gep_l11, align 16 + %load12 = load i8, ptr %gep_l12, align 16 + %load13 = load i8, ptr %gep_l13, align 16 + %load14 = load i8, ptr %gep_l14, align 16 + %load15 = load i8, ptr %gep_l15, align 16 + + %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 + %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 + %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2 + %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3 + %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4 + %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5 + %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6 + %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7 + %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8 + %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9 + %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10 + %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11 + %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12 + %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13 + %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 + %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 + + store i8 %load1, ptr %gep_s0, align 16 + store i8 %load0, ptr %gep_s1, align 16 + store i8 %load2, ptr %gep_s2, align 16 + store i8 %load3, ptr %gep_s3, align 16 + store i8 %load4, ptr %gep_s4, align 16 + store i8 %load5, ptr %gep_s5, align 16 + store i8 %load6, ptr %gep_s6, align 16 + store i8 %load7, ptr %gep_s7, align 16 + store i8 %load8, ptr %gep_s8, align 16 + store i8 %load9, ptr %gep_s9, align 16 + store i8 %load10, ptr %gep_s10, align 16 + store i8 %load11, ptr %gep_s11, align 16 + store i8 %load12, ptr %gep_s12, align 16 + store i8 %load13, ptr %gep_s13, align 16 + store i8 %load14, ptr %gep_s14, align 16 + store i8 %load15, ptr %gep_s15, align 16 + + ret void +} + +; TODO: We want to generate this code: +; define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { +; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0 +; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 +; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 16 %gep_l0, i64 8, <4 x i1> splat (i1 true), i32 4) +; %bitcast_ = bitcast <4 x i32> %strided_load to <16 x i8> +; store <16 x i8> %bitcast_, ptr %gep_s0, align 16 +; ret void +; } +define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { +; CHECK-LABEL: define void @constant_stride_widen_no_reordering( +; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 +; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 +; CHECK-NEXT: [[TMP1:%.*]] = call <28 x i8> @llvm.masked.load.v28i8.p0(ptr [[GEP_L0]], i32 16, <28 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <28 x i8> poison) +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <28 x i8> [[TMP1]], <28 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27> +; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: ret void +; + %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 + %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1 + %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2 + %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3 + %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 8 + %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 9 + %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 10 + %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 11 + %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 16 + %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 17 + %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 18 + %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 19 + %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 24 + %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 25 + %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 26 + %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 27 + + %load0 = load i8, ptr %gep_l0 , align 16 + %load1 = load i8, ptr %gep_l1 , align 16 + %load2 = load i8, ptr %gep_l2 , align 16 + %load3 = load i8, ptr %gep_l3 , align 16 + %load4 = load i8, ptr %gep_l4 , align 16 + %load5 = load i8, ptr %gep_l5 , align 16 + %load6 = load i8, ptr %gep_l6 , align 16 + %load7 = load i8, ptr %gep_l7 , align 16 + %load8 = load i8, ptr %gep_l8 , align 16 + %load9 = load i8, ptr %gep_l9 , align 16 + %load10 = load i8, ptr %gep_l10, align 16 + %load11 = load i8, ptr %gep_l11, align 16 + %load12 = load i8, ptr %gep_l12, align 16 + %load13 = load i8, ptr %gep_l13, align 16 + %load14 = load i8, ptr %gep_l14, align 16 + %load15 = load i8, ptr %gep_l15, align 16 + + %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 + %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 + %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2 + %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3 + %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4 + %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5 + %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6 + %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7 + %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8 + %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9 + %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10 + %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11 + %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12 + %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13 + %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 + %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 + + store i8 %load0, ptr %gep_s0, align 16 + store i8 %load1, ptr %gep_s1, align 16 + store i8 %load2, ptr %gep_s2, align 16 + store i8 %load3, ptr %gep_s3, align 16 + store i8 %load4, ptr %gep_s4, align 16 + store i8 %load5, ptr %gep_s5, align 16 + store i8 %load6, ptr %gep_s6, align 16 + store i8 %load7, ptr %gep_s7, align 16 + store i8 %load8, ptr %gep_s8, align 16 + store i8 %load9, ptr %gep_s9, align 16 + store i8 %load10, ptr %gep_s10, align 16 + store i8 %load11, ptr %gep_s11, align 16 + store i8 %load12, ptr %gep_s12, align 16 + store i8 %load13, ptr %gep_s13, align 16 + store i8 %load14, ptr %gep_s14, align 16 + store i8 %load15, ptr %gep_s15, align 16 + + ret void +} + +; TODO: We want to generate this code: +; define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { +; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0 +; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 +; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 16 %gep_l0, i64 %stride, <4 x i1> splat (i1 true), i32 4) +; %bitcast_ = bitcast <4 x i32> %strided_load to <16 x i8> +; store <16 x i8> %bitcast_, ptr %gep_s0, align 16 +; ret void +; } +define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { +; CHECK-LABEL: define void @rt_stride_widen_no_reordering( +; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[OFFSET0:%.*]] = mul nsw i64 [[STRIDE]], 0 +; CHECK-NEXT: [[OFFSET4:%.*]] = mul nsw i64 [[STRIDE]], 1 +; CHECK-NEXT: [[OFFSET8:%.*]] = mul nsw i64 [[STRIDE]], 2 +; CHECK-NEXT: [[OFFSET12:%.*]] = mul nsw i64 [[STRIDE]], 3 +; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET0]] +; CHECK-NEXT: [[GEP_L4:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET4]] +; CHECK-NEXT: [[GEP_L8:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET8]] +; CHECK-NEXT: [[GEP_L12:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET12]] +; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[GEP_L0]], align 16 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_L4]], align 16 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i8>, ptr [[GEP_L8]], align 16 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i8>, ptr [[GEP_L12]], align 16 +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> [[TMP2]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP11]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i8> [[TMP4]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19> +; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: ret void +; + %offset0 = mul nsw i64 %stride, 0 + %offset1 = add nsw i64 %offset0, 1 + %offset2 = add nsw i64 %offset0, 2 + %offset3 = add nsw i64 %offset0, 3 + %offset4 = mul nsw i64 %stride, 1 + %offset5 = add nsw i64 %offset4, 1 + %offset6 = add nsw i64 %offset4, 2 + %offset7 = add nsw i64 %offset4, 3 + %offset8 = mul nsw i64 %stride, 2 + %offset9 = add nsw i64 %offset8, 1 + %offset10 = add nsw i64 %offset8, 2 + %offset11 = add nsw i64 %offset8, 3 + %offset12 = mul nsw i64 %stride, 3 + %offset13 = add nsw i64 %offset12, 1 + %offset14 = add nsw i64 %offset12, 2 + %offset15 = add nsw i64 %offset12, 3 + + %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0 + %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 %offset1 + %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 %offset2 + %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 %offset3 + %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 %offset4 + %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 %offset5 + %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 %offset6 + %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 %offset7 + %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 %offset8 + %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 %offset9 + %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 %offset10 + %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 %offset11 + %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 %offset12 + %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 %offset13 + %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %offset14 + %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %offset15 + + %load0 = load i8, ptr %gep_l0 , align 16 + %load1 = load i8, ptr %gep_l1 , align 16 + %load2 = load i8, ptr %gep_l2 , align 16 + %load3 = load i8, ptr %gep_l3 , align 16 + %load4 = load i8, ptr %gep_l4 , align 16 + %load5 = load i8, ptr %gep_l5 , align 16 + %load6 = load i8, ptr %gep_l6 , align 16 + %load7 = load i8, ptr %gep_l7 , align 16 + %load8 = load i8, ptr %gep_l8 , align 16 + %load9 = load i8, ptr %gep_l9 , align 16 + %load10 = load i8, ptr %gep_l10, align 16 + %load11 = load i8, ptr %gep_l11, align 16 + %load12 = load i8, ptr %gep_l12, align 16 + %load13 = load i8, ptr %gep_l13, align 16 + %load14 = load i8, ptr %gep_l14, align 16 + %load15 = load i8, ptr %gep_l15, align 16 + + %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 + %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 + %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2 + %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3 + %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4 + %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5 + %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6 + %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7 + %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8 + %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9 + %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10 + %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11 + %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12 + %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13 + %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 + %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 + + store i8 %load0, ptr %gep_s0, align 16 + store i8 %load1, ptr %gep_s1, align 16 + store i8 %load2, ptr %gep_s2, align 16 + store i8 %load3, ptr %gep_s3, align 16 + store i8 %load4, ptr %gep_s4, align 16 + store i8 %load5, ptr %gep_s5, align 16 + store i8 %load6, ptr %gep_s6, align 16 + store i8 %load7, ptr %gep_s7, align 16 + store i8 %load8, ptr %gep_s8, align 16 + store i8 %load9, ptr %gep_s9, align 16 + store i8 %load10, ptr %gep_s10, align 16 + store i8 %load11, ptr %gep_s11, align 16 + store i8 %load12, ptr %gep_s12, align 16 + store i8 %load13, ptr %gep_s13, align 16 + store i8 %load14, ptr %gep_s14, align 16 + store i8 %load15, ptr %gep_s15, align 16 + + ret void +} diff --git a/llvm/test/Transforms/Scalarizer/intrinsics.ll b/llvm/test/Transforms/Scalarizer/intrinsics.ll index cee44ef..070c765 100644 --- a/llvm/test/Transforms/Scalarizer/intrinsics.ll +++ b/llvm/test/Transforms/Scalarizer/intrinsics.ll @@ -8,6 +8,7 @@ declare <2 x float> @llvm.sqrt.v2f32(<2 x float>) declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>) declare <2 x float> @llvm.minimum.v2f32(<2 x float>, <2 x float>) declare <2 x float> @llvm.maximum.v2f32(<2 x float>, <2 x float>) +declare <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float>, <2 x i32>) ; Ternary fp declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) @@ -32,6 +33,8 @@ declare <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float>) ; Unary fp operand, int return type declare <2 x i32> @llvm.lrint.v2i32.v2f32(<2 x float>) declare <2 x i32> @llvm.llrint.v2i32.v2f32(<2 x float>) +declare <2 x i32> @llvm.lround.v2i32.v2f32(<2 x float>) +declare <2 x i32> @llvm.llround.v2i32.v2f32(<2 x float>) ; Bool return type, overloaded on fp operand type declare <2 x i1> @llvm.is.fpclass(<2 x float>, i32) @@ -159,6 +162,22 @@ define <2 x float> @scalarize_powi_v2f32(<2 x float> %x, i32 %y) #0 { ret <2 x float> %powi } +define <2 x float> @scalarize_ldexp_v2f32(<2 x float> %x, <2 x i32> %y) #0 { +; CHECK-LABEL: @scalarize_ldexp_v2f32( +; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0 +; CHECK-NEXT: [[Y:%.*]] = extractelement <2 x i32> [[Y1:%.*]], i64 0 +; CHECK-NEXT: [[POWI_I0:%.*]] = call float @llvm.ldexp.f32.i32(float [[X_I0]], i32 [[Y]]) +; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1 +; CHECK-NEXT: [[Y_I1:%.*]] = extractelement <2 x i32> [[Y1]], i64 1 +; CHECK-NEXT: [[POWI_I1:%.*]] = call float @llvm.ldexp.f32.i32(float [[X_I1]], i32 [[Y_I1]]) +; CHECK-NEXT: [[POWI_UPTO0:%.*]] = insertelement <2 x float> poison, float [[POWI_I0]], i64 0 +; CHECK-NEXT: [[POWI:%.*]] = insertelement <2 x float> [[POWI_UPTO0]], float [[POWI_I1]], i64 1 +; CHECK-NEXT: ret <2 x float> [[POWI]] +; + %powi = call <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> %x, <2 x i32> %y) + ret <2 x float> %powi +} + define <2 x i32> @scalarize_smul_fix_sat_v2i32(<2 x i32> %x) #0 { ; CHECK-LABEL: @scalarize_smul_fix_sat_v2i32( ; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0 @@ -243,6 +262,34 @@ define <2 x i32> @scalarize_llrint(<2 x float> %x) #0 { ret <2 x i32> %rnd } +define <2 x i32> @scalarize_lround(<2 x float> %x) #0 { +; CHECK-LABEL: @scalarize_lround( +; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0 +; CHECK-NEXT: [[RND_I0:%.*]] = call i32 @llvm.lround.i32.f32(float [[X_I0]]) +; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1 +; CHECK-NEXT: [[RND_I1:%.*]] = call i32 @llvm.lround.i32.f32(float [[X_I1]]) +; CHECK-NEXT: [[RND_UPTO0:%.*]] = insertelement <2 x i32> poison, i32 [[RND_I0]], i64 0 +; CHECK-NEXT: [[RND:%.*]] = insertelement <2 x i32> [[RND_UPTO0]], i32 [[RND_I1]], i64 1 +; CHECK-NEXT: ret <2 x i32> [[RND]] +; + %rnd = call <2 x i32> @llvm.lround.v2i32.v2f32(<2 x float> %x) + ret <2 x i32> %rnd +} + +define <2 x i32> @scalarize_llround(<2 x float> %x) #0 { +; CHECK-LABEL: @scalarize_llround( +; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0 +; CHECK-NEXT: [[RND_I0:%.*]] = call i32 @llvm.llround.i32.f32(float [[X_I0]]) +; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1 +; CHECK-NEXT: [[RND_I1:%.*]] = call i32 @llvm.llround.i32.f32(float [[X_I1]]) +; CHECK-NEXT: [[RND_UPTO0:%.*]] = insertelement <2 x i32> poison, i32 [[RND_I0]], i64 0 +; CHECK-NEXT: [[RND:%.*]] = insertelement <2 x i32> [[RND_UPTO0]], i32 [[RND_I1]], i64 1 +; CHECK-NEXT: ret <2 x i32> [[RND]] +; + %rnd = call <2 x i32> @llvm.llround.v2i32.v2f32(<2 x float> %x) + ret <2 x i32> %rnd +} + define <2 x i1> @scalarize_is_fpclass(<2 x float> %x) #0 { ; CHECK-LABEL: @scalarize_is_fpclass( ; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0 |