; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter-out-after "vector.ph:" --version 5 ; RUN: opt %s -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -S | FileCheck %s ; RUN: opt %s -passes=loop-vectorize -force-vector-interleave=2 -force-vector-width=4 -S | FileCheck %s -check-prefix=UNROLL target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" @b = common global [1000 x i32] zeroinitializer, align 16 @c = common global [1000 x i32] zeroinitializer, align 16 @a = common global [1000 x i32] zeroinitializer, align 16 ; Generate min.iters.check to skip the vector loop and jump to scalar.ph directly when loop iteration number is less than VF * UF. define void @foo(i64 %N) { ; CHECK-LABEL: define void @foo( ; CHECK-SAME: i64 [[N:%.*]]) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[C:%.*]] = icmp sgt i64 [[N]], 0 ; CHECK-NEXT: br i1 [[C]], label %[[LOOP_PREHEADER:.*]], [[EXIT:label %.*]] ; CHECK: [[LOOP_PREHEADER]]: ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; ; UNROLL-LABEL: define void @foo( ; UNROLL-SAME: i64 [[N:%.*]]) { ; UNROLL-NEXT: [[ENTRY:.*:]] ; UNROLL-NEXT: [[C:%.*]] = icmp sgt i64 [[N]], 0 ; UNROLL-NEXT: br i1 [[C]], label %[[LOOP_PREHEADER:.*]], [[EXIT:label %.*]] ; UNROLL: [[LOOP_PREHEADER]]: ; UNROLL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8 ; UNROLL-NEXT: br i1 [[MIN_ITERS_CHECK]], [[SCALAR_PH:label %.*]], label %[[VECTOR_PH:.*]] ; UNROLL: [[VECTOR_PH]]: ; entry: %c = icmp sgt i64 %N, 0 br i1 %c, label %loop, label %exit loop: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] %arrayidx = getelementptr inbounds [1000 x i32], ptr @b, i64 0, i64 %iv %tmp = load i32, ptr %arrayidx, align 4 %arrayidx1 = getelementptr inbounds [1000 x i32], ptr @c, i64 0, i64 %iv %tmp1 = load i32, ptr %arrayidx1, align 4 %add = add nsw i32 %tmp1, %tmp %arrayidx2 = getelementptr inbounds [1000 x i32], ptr @a, i64 0, i64 %iv store i32 %add, ptr %arrayidx2, align 4 %iv.next = add nuw nsw i64 %iv, 1 %ec = icmp eq i64 %iv.next, %N br i1 %ec, label %exit, label %loop exit: ret void } define void @min_iters_known_via_loop_guards_add(i32 %start, i32 %end, ptr %src) { ; CHECK-LABEL: define void @min_iters_known_via_loop_guards_add( ; CHECK-SAME: i32 [[START:%.*]], i32 [[END:%.*]], ptr [[SRC:%.*]]) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[END]], [[START]] ; CHECK-NEXT: [[PRE:%.*]] = icmp sgt i32 [[SUB]], 100 ; CHECK-NEXT: call void @llvm.assume(i1 [[PRE]]) ; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[SUB]], 1 ; CHECK-NEXT: [[IV_START:%.*]] = zext i32 [[ADD_1]] to i64 ; CHECK-NEXT: [[TMP0:%.*]] = sub i64 101, [[IV_START]] ; CHECK-NEXT: br label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; ; UNROLL-LABEL: define void @min_iters_known_via_loop_guards_add( ; UNROLL-SAME: i32 [[START:%.*]], i32 [[END:%.*]], ptr [[SRC:%.*]]) { ; UNROLL-NEXT: [[ENTRY:.*:]] ; UNROLL-NEXT: [[SUB:%.*]] = sub i32 [[END]], [[START]] ; UNROLL-NEXT: [[PRE:%.*]] = icmp sgt i32 [[SUB]], 100 ; UNROLL-NEXT: call void @llvm.assume(i1 [[PRE]]) ; UNROLL-NEXT: [[ADD_1:%.*]] = add i32 [[SUB]], 1 ; UNROLL-NEXT: [[IV_START:%.*]] = zext i32 [[ADD_1]] to i64 ; UNROLL-NEXT: [[TMP0:%.*]] = sub i64 101, [[IV_START]] ; UNROLL-NEXT: br label %[[VECTOR_PH:.*]] ; UNROLL: [[VECTOR_PH]]: ; entry: %sub = sub i32 %end, %start %pre = icmp sgt i32 %sub, 100 call void @llvm.assume(i1 %pre) %add.1 = add i32 %sub, 1 %iv.start = zext i32 %add.1 to i64 br label %loop loop: %iv = phi i64 [ %iv.start, %entry ], [ %iv.next, %loop ] %gep = getelementptr inbounds i64, ptr %src, i64 %iv store i64 %iv, ptr %gep %iv.next = add i64 %iv, 1 %ec = icmp eq i64 %iv, 100 br i1 %ec, label %exit, label %loop exit: ret void }