; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 4 ; RUN: opt -p loop-vectorize -force-vector-interleave=4 -S %s | FileCheck %s target triple = "aarch64-unknown-linux-gnu" declare void @init_mem(ptr, i64); define i64 @same_exit_block_pre_inc_use1() #0 { ; CHECK-LABEL: define i64 @same_exit_block_pre_inc_use1( ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[P1:%.*]] = alloca [1024 x i8], align 4 ; CHECK-NEXT: [[P2:%.*]] = alloca [1024 x i8], align 4 ; CHECK-NEXT: call void @init_mem(ptr [[P1]], i64 1024) ; CHECK-NEXT: call void @init_mem(ptr [[P2]], i64 1024) ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 6 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 510, [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 64 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 510, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 510, [[N_MOD_VF]] ; CHECK-NEXT: [[INDEX_NEXT:%.*]] = add i64 3, [[N_VEC]] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX1]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 4 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 [[TMP9]] ; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP27:%.*]] = shl nuw i64 [[TMP11]], 5 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 [[TMP27]] ; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 48 ; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 [[TMP15]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP7]], align 1 ; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP10]], align 1 ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP13]], align 1 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load , ptr [[TMP28]], align 1 ; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP19:%.*]] = shl nuw i64 [[TMP18]], 4 ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[TMP29]], i64 [[TMP19]] ; CHECK-NEXT: [[TMP21:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP22:%.*]] = shl nuw i64 [[TMP21]], 5 ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[TMP29]], i64 [[TMP22]] ; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP25:%.*]] = mul nuw i64 [[TMP24]], 48 ; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[TMP29]], i64 [[TMP25]] ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP29]], align 1 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load , ptr [[TMP20]], align 1 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load , ptr [[TMP23]], align 1 ; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load , ptr [[TMP26]], align 1 ; CHECK-NEXT: [[TMP32:%.*]] = icmp ne [[WIDE_LOAD]], [[WIDE_LOAD5]] ; CHECK-NEXT: [[TMP30:%.*]] = icmp ne [[WIDE_LOAD2]], [[WIDE_LOAD6]] ; CHECK-NEXT: [[TMP31:%.*]] = icmp ne [[WIDE_LOAD3]], [[WIDE_LOAD7]] ; CHECK-NEXT: [[TMP59:%.*]] = icmp ne [[WIDE_LOAD4]], [[WIDE_LOAD8]] ; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], [[TMP3]] ; CHECK-NEXT: [[TMP37:%.*]] = freeze [[TMP32]] ; CHECK-NEXT: [[TMP38:%.*]] = freeze [[TMP30]] ; CHECK-NEXT: [[TMP54:%.*]] = or [[TMP37]], [[TMP38]] ; CHECK-NEXT: [[TMP60:%.*]] = freeze [[TMP31]] ; CHECK-NEXT: [[TMP62:%.*]] = or [[TMP54]], [[TMP60]] ; CHECK-NEXT: [[TMP34:%.*]] = freeze [[TMP59]] ; CHECK-NEXT: [[TMP33:%.*]] = or [[TMP62]], [[TMP34]] ; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.or.nxv16i1( [[TMP33]]) ; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT3]], [[N_VEC]] ; CHECK-NEXT: [[TMP36:%.*]] = or i1 [[TMP12]], [[TMP35]] ; CHECK-NEXT: br i1 [[TMP36]], label [[MIDDLE_SPLIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.split: ; CHECK-NEXT: br i1 [[TMP12]], label [[VECTOR_EARLY_EXIT:%.*]], label [[LOOP_INC:%.*]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 510, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOP_END:%.*]], label [[SCALAR_PH]] ; CHECK: vector.early.exit: ; CHECK-NEXT: [[TMP39:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP40:%.*]] = mul nuw i64 [[TMP39]], 16 ; CHECK-NEXT: [[TMP41:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv16i1( [[TMP59]], i1 true) ; CHECK-NEXT: [[TMP42:%.*]] = mul i64 [[TMP40]], 3 ; CHECK-NEXT: [[TMP43:%.*]] = add i64 [[TMP42]], [[TMP41]] ; CHECK-NEXT: [[TMP44:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv16i1( [[TMP31]], i1 true) ; CHECK-NEXT: [[TMP45:%.*]] = mul i64 [[TMP40]], 2 ; CHECK-NEXT: [[TMP46:%.*]] = add i64 [[TMP45]], [[TMP44]] ; CHECK-NEXT: [[TMP47:%.*]] = icmp ne i64 [[TMP44]], [[TMP40]] ; CHECK-NEXT: [[TMP48:%.*]] = select i1 [[TMP47]], i64 [[TMP46]], i64 [[TMP43]] ; CHECK-NEXT: [[TMP49:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv16i1( [[TMP30]], i1 true) ; CHECK-NEXT: [[TMP50:%.*]] = mul i64 [[TMP40]], 1 ; CHECK-NEXT: [[TMP51:%.*]] = add i64 [[TMP50]], [[TMP49]] ; CHECK-NEXT: [[TMP52:%.*]] = icmp ne i64 [[TMP49]], [[TMP40]] ; CHECK-NEXT: [[TMP53:%.*]] = select i1 [[TMP52]], i64 [[TMP51]], i64 [[TMP48]] ; CHECK-NEXT: [[TMP61:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.nxv16i1( [[TMP32]], i1 true) ; CHECK-NEXT: [[TMP55:%.*]] = mul i64 [[TMP40]], 0 ; CHECK-NEXT: [[TMP56:%.*]] = add i64 [[TMP55]], [[TMP61]] ; CHECK-NEXT: [[TMP57:%.*]] = icmp ne i64 [[TMP61]], [[TMP40]] ; CHECK-NEXT: [[TMP58:%.*]] = select i1 [[TMP57]], i64 [[TMP56]], i64 [[TMP53]] ; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX1]], [[TMP58]] ; CHECK-NEXT: [[TMP17:%.*]] = add i64 3, [[TMP16]] ; CHECK-NEXT: br label [[LOOP_END]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT]], [[LOOP_INC]] ], [ 3, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP1:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ [[INDEX_NEXT1:%.*]], [[LOOP_INC1:%.*]] ], [ [[INDEX]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX2]] ; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX2]] ; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1 ; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] ; CHECK-NEXT: br i1 [[CMP3]], label [[LOOP_INC1]], label [[LOOP_END]] ; CHECK: loop.inc: ; CHECK-NEXT: [[INDEX_NEXT1]] = add i64 [[INDEX2]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT1]], 513 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP1]], label [[LOOP_END]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: loop.end: ; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX2]], [[LOOP1]] ], [ 67, [[LOOP_INC1]] ], [ 67, [[LOOP_INC]] ], [ [[TMP17]], [[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: %p1 = alloca [1024 x i8] %p2 = alloca [1024 x i8] call void @init_mem(ptr %p1, i64 1024) call void @init_mem(ptr %p2, i64 1024) br label %loop loop: %index = phi i64 [ %index.next, %loop.inc ], [ 3, %entry ] %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index %ld1 = load i8, ptr %arrayidx, align 1 %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index %ld2 = load i8, ptr %arrayidx1, align 1 %cmp3 = icmp eq i8 %ld1, %ld2 br i1 %cmp3, label %loop.inc, label %loop.end loop.inc: %index.next = add i64 %index, 1 %exitcond = icmp ne i64 %index.next, 513 br i1 %exitcond, label %loop, label %loop.end loop.end: %retval = phi i64 [ %index, %loop ], [ 67, %loop.inc ] ret i64 %retval } attributes #0 = { "target-features"="+sve" vscale_range(1,16) }