diff options
author | Florian Hahn <flo@fhahn.com> | 2022-01-07 22:06:22 +0000 |
---|---|---|
committer | Florian Hahn <flo@fhahn.com> | 2022-01-08 15:21:57 +0000 |
commit | daa5e263129803179333862ffb760fef9f05be27 (patch) | |
tree | 7bec3f27a6a483350dc179e917d981ec9d808bcc | |
parent | 75d8507e456f3de82bd614625803a7f3555d89d9 (diff) | |
download | llvm-daa5e263129803179333862ffb760fef9f05be27.zip llvm-daa5e263129803179333862ffb760fef9f05be27.tar.gz llvm-daa5e263129803179333862ffb760fef9f05be27.tar.bz2 |
[LV] Make tests more robust by removing undef.
Replace some uses of undef in the tests. The undef causes runtime checks
to be trivially fold/removeable, which does defeat the purpose of the tests.
-rw-r--r-- | llvm/test/Transforms/LoopVectorize/PowerPC/pr41179.ll | 42 | ||||
-rw-r--r-- | llvm/test/Transforms/LoopVectorize/if-pred-stores.ll | 203 |
2 files changed, 135 insertions, 110 deletions
diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/pr41179.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/pr41179.ll index 09fb7d8..5b35ead 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/pr41179.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/pr41179.ll @@ -1,33 +1,37 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -S -loop-vectorize -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s -define void @foo() { +define void @foo(i8* %start, i8* %end) { ; CHECK-LABEL: @foo( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 undef, i64 undef) -; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[UMIN]] to i32 -; CHECK-NEXT: [[TMP1:%.*]] = sub i32 undef, [[TMP0]] -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP1]], 2 +; CHECK-NEXT: [[START2:%.*]] = ptrtoint i8* [[START:%.*]] to i64 +; CHECK-NEXT: [[END1:%.*]] = ptrtoint i8* [[END:%.*]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[END1]] to i32 +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[END1]], -1 +; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[START2]], i64 [[TMP1]]) +; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[UMIN]] to i32 +; CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[TMP0]], [[TMP2]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP3]], 2 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], 2 -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP1]], [[N_MOD_VF]] +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP3]], 2 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP3]], [[N_MOD_VF]] ; CHECK-NEXT: [[IND_END:%.*]] = sub i32 0, [[N_VEC]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i32 0, [[INDEX]] ; CHECK-NEXT: [[INDUCTION:%.*]] = add i32 [[OFFSET_IDX]], 0 -; CHECK-NEXT: [[INDUCTION1:%.*]] = add i32 [[OFFSET_IDX]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = add nsw i32 -1, [[INDUCTION]] -; CHECK-NEXT: [[TMP3:%.*]] = add nsw i32 -1, [[INDUCTION1]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, i8* undef, i32 [[TMP2]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* undef, i32 [[TMP3]] +; CHECK-NEXT: [[INDUCTION3:%.*]] = add i32 [[OFFSET_IDX]], -1 +; CHECK-NEXT: [[TMP4:%.*]] = add nsw i32 -1, [[INDUCTION]] +; CHECK-NEXT: [[TMP5:%.*]] = add nsw i32 -1, [[INDUCTION3]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[END]], i32 [[TMP4]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[END]], i32 [[TMP5]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 -; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP1]], [[N_VEC]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP3]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -35,8 +39,8 @@ define void @foo() { ; CHECK: while.body: ; CHECK-NEXT: [[COUNT_09:%.*]] = phi i32 [ [[ADD:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[ADD]] = add nsw i32 -1, [[COUNT_09]] -; CHECK-NEXT: [[G:%.*]] = getelementptr i8, i8* undef, i32 [[ADD]] -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8* undef, [[G]] +; CHECK-NEXT: [[G:%.*]] = getelementptr i8, i8* [[END]], i32 [[ADD]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8* [[START]], [[G]] ; CHECK-NEXT: br i1 [[CMP]], label [[WHILE_BODY]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK: while.end.loopexit: ; CHECK-NEXT: ret void @@ -47,8 +51,8 @@ entry: while.body: ; preds = %while.body, %entry %count.09 = phi i32 [ %add, %while.body ], [ 0, %entry ] %add = add nsw i32 -1, %count.09 - %G = getelementptr i8, i8* undef, i32 %add - %cmp = icmp ult i8* undef, %G + %G = getelementptr i8, i8* %end, i32 %add + %cmp = icmp ult i8* %start, %G br i1 %cmp, label %while.body, label %while.end.loopexit while.end.loopexit: ; preds = %while.body diff --git a/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll b/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll index 360f58b..65d24b0 100644 --- a/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll +++ b/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll @@ -192,61 +192,65 @@ for.end: ; vectorized loop body. ; PR18724 -define void @bug18724(i1 %cond) { +define void @bug18724(i1 %cond, [768 x i32]* %ptr, i1 %cond.2, i64 %v.1, i32 %v.2) { ; UNROLL-LABEL: @bug18724( ; UNROLL-NEXT: entry: ; UNROLL-NEXT: [[TMP0:%.*]] = xor i1 [[COND:%.*]], true ; UNROLL-NEXT: call void @llvm.assume(i1 [[TMP0]]) -; UNROLL-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 undef, i32 0) -; UNROLL-NEXT: [[TMP1:%.*]] = sub i32 [[SMAX]], undef -; UNROLL-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -; UNROLL-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1 -; UNROLL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 2 +; UNROLL-NEXT: [[TMP1:%.*]] = trunc i64 [[V_1:%.*]] to i32 +; UNROLL-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP1]], i32 0) +; UNROLL-NEXT: [[TMP2:%.*]] = sub i32 [[SMAX]], [[TMP1]] +; UNROLL-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; UNROLL-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1 +; UNROLL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 2 ; UNROLL-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; UNROLL: vector.ph: -; UNROLL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 2 -; UNROLL-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] -; UNROLL-NEXT: [[IND_END:%.*]] = add i64 undef, [[N_VEC]] +; UNROLL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 2 +; UNROLL-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]] +; UNROLL-NEXT: [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]] ; UNROLL-NEXT: br label [[VECTOR_BODY:%.*]] ; UNROLL: vector.body: ; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE4:%.*]] ] -; UNROLL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ undef, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE4]] ] +; UNROLL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[V_2:%.*]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE4]] ] ; UNROLL-NEXT: [[VEC_PHI2:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[PREDPHI5:%.*]], [[PRED_STORE_CONTINUE4]] ] -; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i64 undef, [[INDEX]] +; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[V_1]], [[INDEX]] ; UNROLL-NEXT: [[INDUCTION:%.*]] = add i64 [[OFFSET_IDX]], 0 ; UNROLL-NEXT: [[INDUCTION1:%.*]] = add i64 [[OFFSET_IDX]], 1 -; UNROLL-NEXT: [[TMP4:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 [[INDUCTION]] -; UNROLL-NEXT: [[TMP5:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 [[INDUCTION1]] -; UNROLL-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP4]], align 4 +; UNROLL-NEXT: [[TMP5:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR:%.*]], i64 0, i64 [[INDUCTION]] +; UNROLL-NEXT: [[TMP6:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR]], i64 0, i64 [[INDUCTION1]] ; UNROLL-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP5]], align 4 -; UNROLL-NEXT: br i1 undef, label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE4]] +; UNROLL-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP6]], align 4 +; UNROLL-NEXT: br i1 [[COND_2:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE4]] ; UNROLL: pred.store.if: -; UNROLL-NEXT: store i32 2, i32* [[TMP4]], align 4 +; UNROLL-NEXT: store i32 2, i32* [[TMP5]], align 4 +; UNROLL-NEXT: store i32 2, i32* [[TMP6]], align 4 ; UNROLL-NEXT: br label [[PRED_STORE_CONTINUE4]] ; UNROLL: pred.store.continue4: -; UNROLL-NEXT: [[TMP8:%.*]] = add i32 [[VEC_PHI]], 1 -; UNROLL-NEXT: [[TMP9:%.*]] = add i32 [[VEC_PHI2]], 1 -; UNROLL-NEXT: [[PREDPHI]] = select i1 undef, i32 [[VEC_PHI]], i32 [[TMP8]] -; UNROLL-NEXT: [[PREDPHI5]] = select i1 undef, i32 [[VEC_PHI2]], i32 [[TMP9]] +; UNROLL-NEXT: [[TMP9:%.*]] = add i32 [[VEC_PHI]], 1 +; UNROLL-NEXT: [[TMP10:%.*]] = add i32 [[VEC_PHI2]], 1 +; UNROLL-NEXT: [[TMP11:%.*]] = xor i1 [[COND_2]], true +; UNROLL-NEXT: [[TMP12:%.*]] = xor i1 [[COND_2]], true +; UNROLL-NEXT: [[PREDPHI]] = select i1 [[TMP11]], i32 [[VEC_PHI]], i32 [[TMP9]] +; UNROLL-NEXT: [[PREDPHI5]] = select i1 [[TMP12]], i32 [[VEC_PHI2]], i32 [[TMP10]] ; UNROLL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 -; UNROLL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; UNROLL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; UNROLL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; UNROLL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; UNROLL: middle.block: ; UNROLL-NEXT: [[BIN_RDX:%.*]] = add i32 [[PREDPHI5]], [[PREDPHI]] -; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] -; UNROLL-NEXT: [[TMP11:%.*]] = xor i1 [[CMP_N]], true -; UNROLL-NEXT: call void @llvm.assume(i1 [[TMP11]]) +; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]] +; UNROLL-NEXT: [[TMP14:%.*]] = xor i1 [[CMP_N]], true +; UNROLL-NEXT: call void @llvm.assume(i1 [[TMP14]]) ; UNROLL-NEXT: br label [[SCALAR_PH]] ; UNROLL: scalar.ph: -; UNROLL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ undef, [[ENTRY:%.*]] ] -; UNROLL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ undef, [[ENTRY]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] +; UNROLL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[V_1]], [[ENTRY:%.*]] ] +; UNROLL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[V_2]], [[ENTRY]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; UNROLL-NEXT: br label [[FOR_BODY14:%.*]] ; UNROLL: for.body14: ; UNROLL-NEXT: [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; UNROLL-NEXT: [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] -; UNROLL-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 [[INDVARS_IV3]] +; UNROLL-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR]], i64 0, i64 [[INDVARS_IV3]] ; UNROLL-NEXT: [[TMP:%.*]] = load i32, i32* [[ARRAYIDX16]], align 4 -; UNROLL-NEXT: br i1 undef, label [[IF_THEN18:%.*]], label [[FOR_INC23]] +; UNROLL-NEXT: br i1 [[COND_2]], label [[IF_THEN18:%.*]], label [[FOR_INC23]] ; UNROLL: if.then18: ; UNROLL-NEXT: store i32 2, i32* [[ARRAYIDX16]], align 4 ; UNROLL-NEXT: [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1 @@ -265,59 +269,62 @@ define void @bug18724(i1 %cond) { ; UNROLL-NOSIMPLIFY: for.body9: ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[COND:%.*]], label [[FOR_INC26:%.*]], label [[FOR_BODY14_PREHEADER:%.*]] ; UNROLL-NOSIMPLIFY: for.body14.preheader: -; UNROLL-NOSIMPLIFY-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 undef, i32 0) -; UNROLL-NOSIMPLIFY-NEXT: [[TMP0:%.*]] = sub i32 [[SMAX]], undef -; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 -; UNROLL-NOSIMPLIFY-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 2 +; UNROLL-NOSIMPLIFY-NEXT: [[TMP0:%.*]] = trunc i64 [[V_1:%.*]] to i32 +; UNROLL-NOSIMPLIFY-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP0]], i32 0) +; UNROLL-NOSIMPLIFY-NEXT: [[TMP1:%.*]] = sub i32 [[SMAX]], [[TMP0]] +; UNROLL-NOSIMPLIFY-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 +; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1 +; UNROLL-NOSIMPLIFY-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 2 ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; UNROLL-NOSIMPLIFY: vector.ph: -; UNROLL-NOSIMPLIFY-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 2 -; UNROLL-NOSIMPLIFY-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]] -; UNROLL-NOSIMPLIFY-NEXT: [[IND_END:%.*]] = add i64 undef, [[N_VEC]] +; UNROLL-NOSIMPLIFY-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 2 +; UNROLL-NOSIMPLIFY-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] +; UNROLL-NOSIMPLIFY-NEXT: [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]] ; UNROLL-NOSIMPLIFY-NEXT: br label [[VECTOR_BODY:%.*]] ; UNROLL-NOSIMPLIFY: vector.body: ; UNROLL-NOSIMPLIFY-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE4:%.*]] ] -; UNROLL-NOSIMPLIFY-NEXT: [[VEC_PHI:%.*]] = phi i32 [ undef, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE4]] ] +; UNROLL-NOSIMPLIFY-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[V_2:%.*]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE4]] ] ; UNROLL-NOSIMPLIFY-NEXT: [[VEC_PHI2:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[PREDPHI5:%.*]], [[PRED_STORE_CONTINUE4]] ] -; UNROLL-NOSIMPLIFY-NEXT: [[OFFSET_IDX:%.*]] = add i64 undef, [[INDEX]] +; UNROLL-NOSIMPLIFY-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[V_1]], [[INDEX]] ; UNROLL-NOSIMPLIFY-NEXT: [[INDUCTION:%.*]] = add i64 [[OFFSET_IDX]], 0 ; UNROLL-NOSIMPLIFY-NEXT: [[INDUCTION1:%.*]] = add i64 [[OFFSET_IDX]], 1 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP3:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 [[INDUCTION]] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP4:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 [[INDUCTION1]] -; UNROLL-NOSIMPLIFY-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP3]], align 4 +; UNROLL-NOSIMPLIFY-NEXT: [[TMP4:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR:%.*]], i64 0, i64 [[INDUCTION]] +; UNROLL-NOSIMPLIFY-NEXT: [[TMP5:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR]], i64 0, i64 [[INDUCTION1]] ; UNROLL-NOSIMPLIFY-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP4]], align 4 -; UNROLL-NOSIMPLIFY-NEXT: br i1 undef, label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] +; UNROLL-NOSIMPLIFY-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP5]], align 4 +; UNROLL-NOSIMPLIFY-NEXT: br i1 [[COND_2:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] ; UNROLL-NOSIMPLIFY: pred.store.if: -; UNROLL-NOSIMPLIFY-NEXT: store i32 2, i32* [[TMP3]], align 4 +; UNROLL-NOSIMPLIFY-NEXT: store i32 2, i32* [[TMP4]], align 4 ; UNROLL-NOSIMPLIFY-NEXT: br label [[PRED_STORE_CONTINUE]] ; UNROLL-NOSIMPLIFY: pred.store.continue: -; UNROLL-NOSIMPLIFY-NEXT: br i1 undef, label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4]] +; UNROLL-NOSIMPLIFY-NEXT: br i1 [[COND_2]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4]] ; UNROLL-NOSIMPLIFY: pred.store.if3: -; UNROLL-NOSIMPLIFY-NEXT: store i32 2, i32* [[TMP4]], align 4 +; UNROLL-NOSIMPLIFY-NEXT: store i32 2, i32* [[TMP5]], align 4 ; UNROLL-NOSIMPLIFY-NEXT: br label [[PRED_STORE_CONTINUE4]] ; UNROLL-NOSIMPLIFY: pred.store.continue4: -; UNROLL-NOSIMPLIFY-NEXT: [[TMP7:%.*]] = add i32 [[VEC_PHI]], 1 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP8:%.*]] = add i32 [[VEC_PHI2]], 1 -; UNROLL-NOSIMPLIFY-NEXT: [[PREDPHI]] = select i1 undef, i32 [[VEC_PHI]], i32 [[TMP7]] -; UNROLL-NOSIMPLIFY-NEXT: [[PREDPHI5]] = select i1 undef, i32 [[VEC_PHI2]], i32 [[TMP8]] +; UNROLL-NOSIMPLIFY-NEXT: [[TMP8:%.*]] = add i32 [[VEC_PHI]], 1 +; UNROLL-NOSIMPLIFY-NEXT: [[TMP9:%.*]] = add i32 [[VEC_PHI2]], 1 +; UNROLL-NOSIMPLIFY-NEXT: [[TMP10:%.*]] = xor i1 [[COND_2]], true +; UNROLL-NOSIMPLIFY-NEXT: [[TMP11:%.*]] = xor i1 [[COND_2]], true +; UNROLL-NOSIMPLIFY-NEXT: [[PREDPHI]] = select i1 [[TMP10]], i32 [[VEC_PHI]], i32 [[TMP8]] +; UNROLL-NOSIMPLIFY-NEXT: [[PREDPHI5]] = select i1 [[TMP11]], i32 [[VEC_PHI2]], i32 [[TMP9]] ; UNROLL-NOSIMPLIFY-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 -; UNROLL-NOSIMPLIFY-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; UNROLL-NOSIMPLIFY-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; UNROLL-NOSIMPLIFY: middle.block: ; UNROLL-NOSIMPLIFY-NEXT: [[BIN_RDX:%.*]] = add i32 [[PREDPHI5]], [[PREDPHI]] -; UNROLL-NOSIMPLIFY-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] +; UNROLL-NOSIMPLIFY-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] ; UNROLL-NOSIMPLIFY-NEXT: br i1 [[CMP_N]], label [[FOR_INC26_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; UNROLL-NOSIMPLIFY: scalar.ph: -; UNROLL-NOSIMPLIFY-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ undef, [[FOR_BODY14_PREHEADER]] ] -; UNROLL-NOSIMPLIFY-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ undef, [[FOR_BODY14_PREHEADER]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] +; UNROLL-NOSIMPLIFY-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[V_1]], [[FOR_BODY14_PREHEADER]] ] +; UNROLL-NOSIMPLIFY-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[V_2]], [[FOR_BODY14_PREHEADER]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; UNROLL-NOSIMPLIFY-NEXT: br label [[FOR_BODY14:%.*]] ; UNROLL-NOSIMPLIFY: for.body14: ; UNROLL-NOSIMPLIFY-NEXT: [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; UNROLL-NOSIMPLIFY-NEXT: [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] -; UNROLL-NOSIMPLIFY-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 [[INDVARS_IV3]] +; UNROLL-NOSIMPLIFY-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR]], i64 0, i64 [[INDVARS_IV3]] ; UNROLL-NOSIMPLIFY-NEXT: [[TMP:%.*]] = load i32, i32* [[ARRAYIDX16]], align 4 -; UNROLL-NOSIMPLIFY-NEXT: br i1 undef, label [[IF_THEN18:%.*]], label [[FOR_INC23]] +; UNROLL-NOSIMPLIFY-NEXT: br i1 [[COND_2]], label [[IF_THEN18:%.*]], label [[FOR_INC23]] ; UNROLL-NOSIMPLIFY: if.then18: ; UNROLL-NOSIMPLIFY-NEXT: store i32 2, i32* [[ARRAYIDX16]], align 4 ; UNROLL-NOSIMPLIFY-NEXT: [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1 @@ -339,53 +346,67 @@ define void @bug18724(i1 %cond) { ; VEC-NEXT: entry: ; VEC-NEXT: [[TMP0:%.*]] = xor i1 [[COND:%.*]], true ; VEC-NEXT: call void @llvm.assume(i1 [[TMP0]]) -; VEC-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 undef, i32 0) -; VEC-NEXT: [[TMP1:%.*]] = sub i32 [[SMAX]], undef -; VEC-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 -; VEC-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1 -; VEC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 2 +; VEC-NEXT: [[TMP1:%.*]] = trunc i64 [[V_1:%.*]] to i32 +; VEC-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP1]], i32 0) +; VEC-NEXT: [[TMP2:%.*]] = sub i32 [[SMAX]], [[TMP1]] +; VEC-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; VEC-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1 +; VEC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 2 ; VEC-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; VEC: vector.ph: -; VEC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 2 -; VEC-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]] -; VEC-NEXT: [[IND_END:%.*]] = add i64 undef, [[N_VEC]] +; VEC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 2 +; VEC-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]] +; VEC-NEXT: [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]] +; VEC-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> zeroinitializer, i32 [[V_2:%.*]], i32 0 +; VEC-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[COND_2:%.*]], i32 0 +; VEC-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer ; VEC-NEXT: br label [[VECTOR_BODY:%.*]] ; VEC: vector.body: ; VEC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ] -; VEC-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ <i32 undef, i32 0>, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE2]] ] -; VEC-NEXT: [[OFFSET_IDX:%.*]] = add i64 undef, [[INDEX]] -; VEC-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 0 -; VEC-NEXT: [[TMP5:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 [[TMP4]] -; VEC-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP5]], i32 0 -; VEC-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to <2 x i32>* -; VEC-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, <2 x i32>* [[TMP7]], align 4 -; VEC-NEXT: br i1 undef, label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE2]] +; VEC-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE2]] ] +; VEC-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[V_1]], [[INDEX]] +; VEC-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 0 +; VEC-NEXT: [[TMP7:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR:%.*]], i64 0, i64 [[TMP6]] +; VEC-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0 +; VEC-NEXT: [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <2 x i32>* +; VEC-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, <2 x i32>* [[TMP9]], align 4 +; VEC-NEXT: [[TMP10:%.*]] = extractelement <2 x i1> [[BROADCAST_SPLAT]], i32 0 +; VEC-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] ; VEC: pred.store.if: -; VEC-NEXT: [[TMP8:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 [[TMP4]] -; VEC-NEXT: store i32 2, i32* [[TMP8]], align 4 +; VEC-NEXT: [[TMP11:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR]], i64 0, i64 [[TMP6]] +; VEC-NEXT: store i32 2, i32* [[TMP11]], align 4 +; VEC-NEXT: br label [[PRED_STORE_CONTINUE]] +; VEC: pred.store.continue: +; VEC-NEXT: [[TMP12:%.*]] = extractelement <2 x i1> [[BROADCAST_SPLAT]], i32 1 +; VEC-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]] +; VEC: pred.store.if1: +; VEC-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 1 +; VEC-NEXT: [[TMP14:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR]], i64 0, i64 [[TMP13]] +; VEC-NEXT: store i32 2, i32* [[TMP14]], align 4 ; VEC-NEXT: br label [[PRED_STORE_CONTINUE2]] ; VEC: pred.store.continue2: -; VEC-NEXT: [[TMP9:%.*]] = add <2 x i32> [[VEC_PHI]], <i32 1, i32 1> -; VEC-NEXT: [[PREDPHI]] = select <2 x i1> undef, <2 x i32> [[VEC_PHI]], <2 x i32> [[TMP9]] +; VEC-NEXT: [[TMP15:%.*]] = add <2 x i32> [[VEC_PHI]], <i32 1, i32 1> +; VEC-NEXT: [[TMP16:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], <i1 true, i1 true> +; VEC-NEXT: [[PREDPHI]] = select <2 x i1> [[TMP16]], <2 x i32> [[VEC_PHI]], <2 x i32> [[TMP15]] ; VEC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 -; VEC-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; VEC-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VEC-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VEC-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; VEC: middle.block: -; VEC-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[PREDPHI]]) -; VEC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]] -; VEC-NEXT: [[TMP12:%.*]] = xor i1 [[CMP_N]], true -; VEC-NEXT: call void @llvm.assume(i1 [[TMP12]]) +; VEC-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[PREDPHI]]) +; VEC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]] +; VEC-NEXT: [[TMP19:%.*]] = xor i1 [[CMP_N]], true +; VEC-NEXT: call void @llvm.assume(i1 [[TMP19]]) ; VEC-NEXT: br label [[SCALAR_PH]] ; VEC: scalar.ph: -; VEC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ undef, [[ENTRY:%.*]] ] -; VEC-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ undef, [[ENTRY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] +; VEC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[V_1]], [[ENTRY:%.*]] ] +; VEC-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[V_2]], [[ENTRY]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ] ; VEC-NEXT: br label [[FOR_BODY14:%.*]] ; VEC: for.body14: ; VEC-NEXT: [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; VEC-NEXT: [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] -; VEC-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 [[INDVARS_IV3]] +; VEC-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], [768 x i32]* [[PTR]], i64 0, i64 [[INDVARS_IV3]] ; VEC-NEXT: [[TMP:%.*]] = load i32, i32* [[ARRAYIDX16]], align 4 -; VEC-NEXT: br i1 undef, label [[IF_THEN18:%.*]], label [[FOR_INC23]] +; VEC-NEXT: br i1 [[COND_2]], label [[IF_THEN18:%.*]], label [[FOR_INC23]] ; VEC: if.then18: ; VEC-NEXT: store i32 2, i32* [[ARRAYIDX16]], align 4 ; VEC-NEXT: [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1 @@ -405,11 +426,11 @@ for.body9: br i1 %cond, label %for.inc26, label %for.body14 for.body14: - %indvars.iv3 = phi i64 [ %indvars.iv.next4, %for.inc23 ], [ undef, %for.body9 ] - %iNewChunks.120 = phi i32 [ %iNewChunks.2, %for.inc23 ], [ undef, %for.body9 ] - %arrayidx16 = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 %indvars.iv3 + %indvars.iv3 = phi i64 [ %indvars.iv.next4, %for.inc23 ], [ %v.1, %for.body9 ] + %iNewChunks.120 = phi i32 [ %iNewChunks.2, %for.inc23 ], [ %v.2, %for.body9 ] + %arrayidx16 = getelementptr inbounds [768 x i32], [768 x i32]* %ptr, i64 0, i64 %indvars.iv3 %tmp = load i32, i32* %arrayidx16, align 4 - br i1 undef, label %if.then18, label %for.inc23 + br i1 %cond.2, label %if.then18, label %for.inc23 if.then18: store i32 2, i32* %arrayidx16, align 4 |