; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=loop-vectorize %s -mtriple=arm64-apple-iphoneos -S | FileCheck %s define void @test(ptr %dst, ptr %src) { ; CHECK-LABEL: define void @test( ; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]]) { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[SRC]], align 4 ; CHECK-NEXT: br label %[[LOOP_PH:.*]] ; CHECK: [[LOOP_PH]]: ; CHECK-NEXT: br label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[L]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP0:%.*]] = sext <2 x i32> [[BROADCAST_SPLAT]] to <2 x i64> ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[DST]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[TMP1]], i64 2 ; CHECK-NEXT: store <2 x i64> [[TMP0]], ptr [[TMP1]], align 8 ; CHECK-NEXT: store <2 x i64> [[TMP0]], ptr [[TMP2]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 20 ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; entry: %l = load i32, ptr %src br label %loop.ph loop.ph: br label %loop loop: %iv = phi i64 [ 0, %loop.ph ], [ %iv.next, %loop ] %l.cast = sext i32 %l to i64 %dst.idx = getelementptr i64, ptr %dst, i64 %iv store i64 %l.cast, ptr %dst.idx %iv.next = add nuw nsw i64 %iv, 1 %cmp9.us = icmp ult i64 %iv.next, 20 br i1 %cmp9.us, label %loop, label %exit exit: ret void } ; Test that cast context hints are only computed for memory operations. ; When a sext has a non-memory operand (like an add), CCH should be None. define i32 @sext_of_non_memory_op(ptr %src, i32 %offset, i64 %n) #0 { ; CHECK-LABEL: define i32 @sext_of_non_memory_op( ; CHECK-SAME: ptr [[SRC:%.*]], i32 [[OFFSET:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[ITER_CHECK:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i64 [[TMP1]], 3 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] ; CHECK: [[VECTOR_SCEVCHECK]]: ; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[N]] to i32 ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[OFFSET]], [[TMP3]] ; CHECK-NEXT: [[TMP5:%.*]] = icmp slt i32 [[TMP4]], [[OFFSET]] ; CHECK-NEXT: [[TMP6:%.*]] = icmp ugt i64 [[N]], 4294967295 ; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]] ; CHECK-NEXT: br i1 [[TMP7]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] ; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 6 ; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[TMP0]], [[TMP9]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP11:%.*]] = shl nuw i64 [[TMP10]], 4 ; CHECK-NEXT: [[TMP12:%.*]] = shl nuw i64 [[TMP11]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP12]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP26:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP27:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP28:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI4:%.*]] = phi [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP29:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP13:%.*]] = trunc i64 [[INDEX]] to i32 ; CHECK-NEXT: [[TMP14:%.*]] = add i32 [[OFFSET]], [[TMP13]] ; CHECK-NEXT: [[TMP15:%.*]] = sext i32 [[TMP14]] to i64 ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP15]] ; CHECK-NEXT: [[TMP17:%.*]] = shl nuw nsw i64 [[TMP11]], 1 ; CHECK-NEXT: [[TMP18:%.*]] = mul nuw nsw i64 [[TMP11]], 3 ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[TMP16]], i64 [[TMP11]] ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[TMP16]], i64 [[TMP17]] ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP16]], i64 [[TMP18]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP16]], align 1 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load , ptr [[TMP19]], align 1 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load , ptr [[TMP20]], align 1 ; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load , ptr [[TMP21]], align 1 ; CHECK-NEXT: [[TMP22:%.*]] = zext [[WIDE_LOAD]] to ; CHECK-NEXT: [[TMP23:%.*]] = zext [[WIDE_LOAD5]] to ; CHECK-NEXT: [[TMP24:%.*]] = zext [[WIDE_LOAD6]] to ; CHECK-NEXT: [[TMP25:%.*]] = zext [[WIDE_LOAD7]] to ; CHECK-NEXT: [[TMP26]] = or [[VEC_PHI]], [[TMP22]] ; CHECK-NEXT: [[TMP27]] = or [[VEC_PHI2]], [[TMP23]] ; CHECK-NEXT: [[TMP28]] = or [[VEC_PHI3]], [[TMP24]] ; CHECK-NEXT: [[TMP29]] = or [[VEC_PHI4]], [[TMP25]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] ; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[BIN_RDX:%.*]] = or [[TMP27]], [[TMP26]] ; CHECK-NEXT: [[BIN_RDX8:%.*]] = or [[TMP28]], [[BIN_RDX]] ; CHECK-NEXT: [[BIN_RDX9:%.*]] = or [[TMP29]], [[BIN_RDX8]] ; CHECK-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.or.nxv16i32( [[BIN_RDX9]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]] ; CHECK: [[VEC_EPILOG_ITER_CHECK]]: ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], [[TMP2]] ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF4:![0-9]+]] ; CHECK: [[VEC_EPILOG_PH]]: ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP31]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: [[TMP32:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP33:%.*]] = shl nuw i64 [[TMP32]], 3 ; CHECK-NEXT: [[N_MOD_VF10:%.*]] = urem i64 [[TMP0]], [[TMP33]] ; CHECK-NEXT: [[N_VEC11:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF10]] ; CHECK-NEXT: [[TMP34:%.*]] = insertelement zeroinitializer, i32 [[BC_MERGE_RDX]], i32 0 ; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]] ; CHECK: [[VEC_EPILOG_VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX12:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT15:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI13:%.*]] = phi [ [[TMP34]], %[[VEC_EPILOG_PH]] ], [ [[TMP40:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP35:%.*]] = trunc i64 [[INDEX12]] to i32 ; CHECK-NEXT: [[TMP36:%.*]] = add i32 [[OFFSET]], [[TMP35]] ; CHECK-NEXT: [[TMP37:%.*]] = sext i32 [[TMP36]] to i64 ; CHECK-NEXT: [[TMP38:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP37]] ; CHECK-NEXT: [[WIDE_LOAD14:%.*]] = load , ptr [[TMP38]], align 1 ; CHECK-NEXT: [[TMP39:%.*]] = zext [[WIDE_LOAD14]] to ; CHECK-NEXT: [[TMP40]] = or [[VEC_PHI13]], [[TMP39]] ; CHECK-NEXT: [[INDEX_NEXT15]] = add nuw i64 [[INDEX12]], [[TMP33]] ; CHECK-NEXT: [[TMP41:%.*]] = icmp eq i64 [[INDEX_NEXT15]], [[N_VEC11]] ; CHECK-NEXT: br i1 [[TMP41]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; CHECK-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.or.nxv8i32( [[TMP40]]) ; CHECK-NEXT: [[CMP_N16:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC11]] ; CHECK-NEXT: br i1 [[CMP_N16]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]] ; CHECK: [[VEC_EPILOG_SCALAR_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC11]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[ITER_CHECK]] ] ; CHECK-NEXT: [[BC_MERGE_RDX17:%.*]] = phi i32 [ [[TMP42]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP31]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[ITER_CHECK]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ [[BC_MERGE_RDX17]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[OR:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[OFFSET]], [[IV_TRUNC]] ; CHECK-NEXT: [[ADD_EXT:%.*]] = sext i32 [[ADD]] to i64 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[ADD_EXT]] ; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr [[GEP]], align 1 ; CHECK-NEXT: [[LOAD_EXT:%.*]] = zext i8 [[LOAD]] to i32 ; CHECK-NEXT: [[OR]] = or i32 [[SUM]], [[LOAD_EXT]] ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]] ; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], %[[LOOP]] ], [ [[TMP31]], %[[MIDDLE_BLOCK]] ], [ [[TMP42]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[OR_LCSSA]] ; entry: br label %loop loop: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] %sum = phi i32 [ 0, %entry ], [ %or, %loop ] %iv.trunc = trunc i64 %iv to i32 %add = add i32 %offset, %iv.trunc %add.ext = sext i32 %add to i64 %gep = getelementptr i8, ptr %src, i64 %add.ext %load = load i8, ptr %gep, align 1 %load.ext = zext i8 %load to i32 %or = or i32 %sum, %load.ext %iv.next = add i64 %iv, 1 %ec = icmp eq i64 %iv, %n br i1 %ec, label %exit, label %loop exit: ret i32 %or } attributes #0 = { "target-cpu"="neoverse-512tvb" } ;. ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} ; CHECK: [[PROF4]] = !{!"branch_weights", i32 8, i32 56} ; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} ; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]]} ;.