diff options
Diffstat (limited to 'llvm/test/Transforms/IndVarSimplify')
3 files changed, 743 insertions, 13 deletions
diff --git a/llvm/test/Transforms/IndVarSimplify/X86/overflow-intrinsics.ll b/llvm/test/Transforms/IndVarSimplify/X86/overflow-intrinsics.ll index cb4e07e..9b9bc68 100644 --- a/llvm/test/Transforms/IndVarSimplify/X86/overflow-intrinsics.ll +++ b/llvm/test/Transforms/IndVarSimplify/X86/overflow-intrinsics.ll @@ -60,8 +60,7 @@ define void @f_sadd_overflow(ptr %a) { ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[CONT:.*]] ], [ 2147483645, %[[ENTRY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDVARS_IV]] ; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 2147483647 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] +; CHECK-NEXT: br i1 true, label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] ; CHECK: [[TRAP]]: ; CHECK-NEXT: tail call void @llvm.trap(), !nosanitize [[META0]] ; CHECK-NEXT: unreachable, !nosanitize [[META0]] @@ -150,8 +149,7 @@ define void @f_uadd_overflow(ptr %a) { ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[CONT:.*]] ], [ -6, %[[ENTRY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDVARS_IV]] ; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], -1 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] +; CHECK-NEXT: br i1 true, label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] ; CHECK: [[TRAP]]: ; CHECK-NEXT: tail call void @llvm.trap(), !nosanitize [[META0]] ; CHECK-NEXT: unreachable, !nosanitize [[META0]] @@ -243,10 +241,7 @@ define void @f_ssub_overflow(ptr nocapture %a) { ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[CONT:.*]] ], [ -2147483642, %[[ENTRY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDVARS_IV]] ; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[TMP0:%.*]] = trunc nsw i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[TMP1:%.*]] = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 [[TMP0]], i32 1) -; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1 -; CHECK-NEXT: br i1 [[TMP2]], label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] +; CHECK-NEXT: br i1 true, label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] ; CHECK: [[TRAP]]: ; CHECK-NEXT: tail call void @llvm.trap(), !nosanitize [[META0]] ; CHECK-NEXT: unreachable, !nosanitize [[META0]] @@ -339,10 +334,7 @@ define void @f_usub_overflow(ptr nocapture %a) { ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[CONT:.*]] ], [ 15, %[[ENTRY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDVARS_IV]] ; CHECK-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1 -; CHECK-NEXT: [[TMP0:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[TMP1:%.*]] = tail call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[TMP0]], i32 1) -; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1 -; CHECK-NEXT: br i1 [[TMP2]], label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] +; CHECK-NEXT: br i1 true, label %[[TRAP:.*]], label %[[CONT]], !nosanitize [[META0]] ; CHECK: [[TRAP]]: ; CHECK-NEXT: tail call void @llvm.trap(), !nosanitize [[META0]] ; CHECK-NEXT: unreachable, !nosanitize [[META0]] diff --git a/llvm/test/Transforms/IndVarSimplify/pointer-loop-guards.ll b/llvm/test/Transforms/IndVarSimplify/pointer-loop-guards.ll index 89b132e..9371fe2 100644 --- a/llvm/test/Transforms/IndVarSimplify/pointer-loop-guards.ll +++ b/llvm/test/Transforms/IndVarSimplify/pointer-loop-guards.ll @@ -18,7 +18,7 @@ define i64 @test_ptr_compare_guard(ptr %start, ptr %end) { ; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[EXIT_LOOPEXIT:.*]] ; CHECK: [[LOOP_LATCH]]: ; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 1 -; CHECK-NEXT: [[I64_IV_NEXT]] = add i64 [[I64_IV]], 1 +; CHECK-NEXT: [[I64_IV_NEXT]] = add nuw i64 [[I64_IV]], 1 ; CHECK-NEXT: [[C_2:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]] ; CHECK-NEXT: br i1 [[C_2]], label %[[EXIT_LOOPEXIT]], label %[[LOOP_HEADER]] ; CHECK: [[EXIT_LOOPEXIT]]: diff --git a/llvm/test/Transforms/IndVarSimplify/unreachable-exit.ll b/llvm/test/Transforms/IndVarSimplify/unreachable-exit.ll new file mode 100644 index 0000000..b9c9228 --- /dev/null +++ b/llvm/test/Transforms/IndVarSimplify/unreachable-exit.ll @@ -0,0 +1,738 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S -passes=indvars < %s | FileCheck %s + +define void @optimize_trap(i32 %block_size) { +; CHECK-LABEL: define void @optimize_trap( +; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]]) +; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0 +; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]] +; CHECK: [[FOR_BODY_PREHEADER]]: +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[BLOCK_SIZE]], -1 +; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP1]], i32 3) +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 3, [[UMIN]] +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]: +; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]]) +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: br i1 [[TMP2]], label %[[IF_THEN:.*]], label %[[IF_END4]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: call void @llvm.trap() +; CHECK-NEXT: unreachable +; CHECK: [[IF_END4]]: +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = xor i8 [[TMP3]], 54 +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: store i8 [[TMP4]], ptr [[ARRAYIDX7]], align 1 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]] +; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]] +; +entry: + %foo_arr = alloca [2 x i8], align 16 + %bar_arr = alloca [2 x i8], align 16 + call void @x(ptr nonnull %foo_arr) + %cmp14.not = icmp eq i32 %block_size, 0 + br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry + br label %for.body + +for.cond.cleanup.loopexit: ; preds = %if.end4 + br label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry + call void @x(ptr nonnull %bar_arr) + ret void + +for.body: ; preds = %for.body.preheader, %if.end4 + %i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ] + %cmp1 = icmp samesign ugt i32 %i.015, 2 + br i1 %cmp1, label %if.then, label %if.end4 + +if.then: ; preds = %for.body + call void @llvm.trap() + unreachable + +if.end4: ; preds = %for.body + %arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015 + %0 = load i8, ptr %arrayidx, align 1 + %1 = xor i8 %0, 54 + %arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015 + store i8 %1, ptr %arrayidx7, align 1 + %inc = add nuw nsw i32 %i.015, 1 + %cmp = icmp ult i32 %inc, %block_size + br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit +} + +define void @no_optimize_atomic(i32 %block_size) { +; CHECK-LABEL: define void @no_optimize_atomic( +; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]]) +; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0 +; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]] +; CHECK: [[FOR_BODY_PREHEADER]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]: +; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]]) +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2 +; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[IF_END4]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: call void @llvm.trap() +; CHECK-NEXT: unreachable +; CHECK: [[IF_END4]]: +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = xor i8 [[TMP3]], 54 +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: store atomic i8 [[TMP4]], ptr [[ARRAYIDX7]] unordered, align 1 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]] +; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]] +; +entry: + %foo_arr = alloca [2 x i8], align 16 + %bar_arr = alloca [2 x i8], align 16 + call void @x(ptr nonnull %foo_arr) + %cmp14.not = icmp eq i32 %block_size, 0 + br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry + br label %for.body + +for.cond.cleanup.loopexit: ; preds = %if.end4 + br label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry + call void @x(ptr nonnull %bar_arr) + ret void + +for.body: ; preds = %for.body.preheader, %if.end4 + %i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ] + %cmp1 = icmp samesign ugt i32 %i.015, 2 + br i1 %cmp1, label %if.then, label %if.end4 + +if.then: ; preds = %for.body + call void @llvm.trap() + unreachable + +if.end4: ; preds = %for.body + %arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015 + %0 = load i8, ptr %arrayidx, align 1 + %1 = xor i8 %0, 54 + %arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015 + store atomic i8 %1, ptr %arrayidx7 unordered, align 1 + %inc = add nuw nsw i32 %i.015, 1 + %cmp = icmp ult i32 %inc, %block_size + br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit +} + +define void @no_optimize_volatile(i32 %block_size) { +; CHECK-LABEL: define void @no_optimize_volatile( +; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]]) +; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0 +; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]] +; CHECK: [[FOR_BODY_PREHEADER]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]: +; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]]) +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2 +; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[IF_END4]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: call void @llvm.trap() +; CHECK-NEXT: unreachable +; CHECK: [[IF_END4]]: +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = xor i8 [[TMP3]], 54 +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: store volatile i8 [[TMP4]], ptr [[ARRAYIDX7]], align 1 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]] +; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]] +; +entry: + %foo_arr = alloca [2 x i8], align 16 + %bar_arr = alloca [2 x i8], align 16 + call void @x(ptr nonnull %foo_arr) + %cmp14.not = icmp eq i32 %block_size, 0 + br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry + br label %for.body + +for.cond.cleanup.loopexit: ; preds = %if.end4 + br label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry + call void @x(ptr nonnull %bar_arr) + ret void + +for.body: ; preds = %for.body.preheader, %if.end4 + %i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ] + %cmp1 = icmp samesign ugt i32 %i.015, 2 + br i1 %cmp1, label %if.then, label %if.end4 + +if.then: ; preds = %for.body + call void @llvm.trap() + unreachable + +if.end4: ; preds = %for.body + %arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015 + %0 = load i8, ptr %arrayidx, align 1 + %1 = xor i8 %0, 54 + %arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015 + store volatile i8 %1, ptr %arrayidx7, align 1 + %inc = add nuw nsw i32 %i.015, 1 + %cmp = icmp ult i32 %inc, %block_size + br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit +} + +define void @no_optimize_call(i32 %block_size) { +; CHECK-LABEL: define void @no_optimize_call( +; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]]) +; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0 +; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]] +; CHECK: [[FOR_BODY_PREHEADER]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]: +; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]]) +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2 +; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[IF_END4]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: call void @llvm.trap() +; CHECK-NEXT: unreachable +; CHECK: [[IF_END4]]: +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = xor i8 [[TMP3]], 54 +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: call void @x(ptr null) +; CHECK-NEXT: store volatile i8 [[TMP4]], ptr [[ARRAYIDX7]], align 1 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]] +; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]] +; +entry: + %foo_arr = alloca [2 x i8], align 16 + %bar_arr = alloca [2 x i8], align 16 + call void @x(ptr nonnull %foo_arr) + %cmp14.not = icmp eq i32 %block_size, 0 + br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry + br label %for.body + +for.cond.cleanup.loopexit: ; preds = %if.end4 + br label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry + call void @x(ptr nonnull %bar_arr) + ret void + +for.body: ; preds = %for.body.preheader, %if.end4 + %i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ] + %cmp1 = icmp samesign ugt i32 %i.015, 2 + br i1 %cmp1, label %if.then, label %if.end4 + +if.then: ; preds = %for.body + call void @llvm.trap() + unreachable + +if.end4: ; preds = %for.body + %arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015 + %0 = load i8, ptr %arrayidx, align 1 + %1 = xor i8 %0, 54 + %arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015 + call void @x(ptr null) + store volatile i8 %1, ptr %arrayidx7, align 1 + %inc = add nuw nsw i32 %i.015, 1 + %cmp = icmp ult i32 %inc, %block_size + br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit +} + +define void @optimize_ubsan_trap(i32 %block_size) { +; CHECK-LABEL: define void @optimize_ubsan_trap( +; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]]) +; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0 +; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]] +; CHECK: [[FOR_BODY_PREHEADER]]: +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[BLOCK_SIZE]], -1 +; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP1]], i32 3) +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 3, [[UMIN]] +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]: +; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]]) +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: br i1 [[TMP2]], label %[[IF_THEN:.*]], label %[[IF_END4]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: call void @llvm.ubsantrap(i8 1) +; CHECK-NEXT: unreachable +; CHECK: [[IF_END4]]: +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = xor i8 [[TMP3]], 54 +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: store i8 [[TMP4]], ptr [[ARRAYIDX7]], align 1 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]] +; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]] +; +entry: + %foo_arr = alloca [2 x i8], align 16 + %bar_arr = alloca [2 x i8], align 16 + call void @x(ptr nonnull %foo_arr) + %cmp14.not = icmp eq i32 %block_size, 0 + br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry + br label %for.body + +for.cond.cleanup.loopexit: ; preds = %if.end4 + br label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry + call void @x(ptr nonnull %bar_arr) + ret void + +for.body: ; preds = %for.body.preheader, %if.end4 + %i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ] + %cmp1 = icmp samesign ugt i32 %i.015, 2 + br i1 %cmp1, label %if.then, label %if.end4 + +if.then: ; preds = %for.body + call void @llvm.ubsantrap(i8 1) + unreachable + +if.end4: ; preds = %for.body + %arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015 + %0 = load i8, ptr %arrayidx, align 1 + %1 = xor i8 %0, 54 + %arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015 + store i8 %1, ptr %arrayidx7, align 1 + %inc = add nuw nsw i32 %i.015, 1 + %cmp = icmp ult i32 %inc, %block_size + br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit +} + +define void @no_optimize_arbitrary_call(i32 %block_size) { +; CHECK-LABEL: define void @no_optimize_arbitrary_call( +; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]]) +; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0 +; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]] +; CHECK: [[FOR_BODY_PREHEADER]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]: +; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]]) +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2 +; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[IF_END4]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: call void @noreturn_with_argmem(ptr [[FOO_ARR]]) +; CHECK-NEXT: unreachable +; CHECK: [[IF_END4]]: +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = xor i8 [[TMP3]], 54 +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: store i8 [[TMP4]], ptr [[ARRAYIDX7]], align 1 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]] +; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]] +; +entry: + %foo_arr = alloca [2 x i8], align 16 + %bar_arr = alloca [2 x i8], align 16 + call void @x(ptr nonnull %foo_arr) + %cmp14.not = icmp eq i32 %block_size, 0 + br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry + br label %for.body + +for.cond.cleanup.loopexit: ; preds = %if.end4 + br label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry + call void @x(ptr nonnull %bar_arr) + ret void + +for.body: ; preds = %for.body.preheader, %if.end4 + %i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ] + %cmp1 = icmp samesign ugt i32 %i.015, 2 + br i1 %cmp1, label %if.then, label %if.end4 + +if.then: ; preds = %for.body + call void @noreturn_with_argmem(ptr %foo_arr) + unreachable + +if.end4: ; preds = %for.body + %arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015 + %0 = load i8, ptr %arrayidx, align 1 + %1 = xor i8 %0, 54 + %arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015 + store i8 %1, ptr %arrayidx7, align 1 + %inc = add nuw nsw i32 %i.015, 1 + %cmp = icmp ult i32 %inc, %block_size + br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit +} + +define void @no_optimize_two_exits(i32 %block_size) { +; CHECK-LABEL: define void @no_optimize_two_exits( +; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]]) +; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0 +; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]] +; CHECK: [[FOR_BODY_PREHEADER]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]: +; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]]) +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[P:%.*]] = call i1 @pred() +; CHECK-NEXT: br i1 [[P]], label %[[FOR_BODY_CONT:.*]], label %[[FOR_COND_CLEANUP_LOOPEXIT]] +; CHECK: [[FOR_BODY_CONT]]: +; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2 +; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[IF_END4]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: call void @noreturn(ptr [[FOO_ARR]]) +; CHECK-NEXT: unreachable +; CHECK: [[IF_END4]]: +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[TMP0]], 54 +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: store i8 [[TMP1]], ptr [[ARRAYIDX7]], align 1 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]] +; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]] +; +entry: + %foo_arr = alloca [2 x i8], align 16 + %bar_arr = alloca [2 x i8], align 16 + call void @x(ptr nonnull %foo_arr) + %cmp14.not = icmp eq i32 %block_size, 0 + br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry + br label %for.body + +for.cond.cleanup.loopexit: ; preds = %if.end4 + br label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry + call void @x(ptr nonnull %bar_arr) + ret void + +for.body: + %i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ] + %p = call i1 @pred() + br i1 %p, label %for.body.cont, label %for.cond.cleanup.loopexit + +for.body.cont: ; preds = %for.body.preheader, %if.end4 + %cmp1 = icmp samesign ugt i32 %i.015, 2 + br i1 %cmp1, label %if.then, label %if.end4 + +if.then: ; preds = %for.body + call void @noreturn(ptr %foo_arr) + unreachable + +if.end4: ; preds = %for.body + %arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015 + %0 = load i8, ptr %arrayidx, align 1 + %1 = xor i8 %0, 54 + %arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015 + store i8 %1, ptr %arrayidx7, align 1 + %inc = add nuw nsw i32 %i.015, 1 + %cmp = icmp ult i32 %inc, %block_size + br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit +} + +define void @no_optimize_two_exits2(i32 %block_size) { +; CHECK-LABEL: define void @no_optimize_two_exits2( +; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]]) +; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0 +; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]] +; CHECK: [[FOR_BODY_PREHEADER]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]: +; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]]) +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2 +; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[FOR_BODY_CONT:.*]] +; CHECK: [[FOR_BODY_CONT]]: +; CHECK-NEXT: [[P:%.*]] = call i1 @pred() +; CHECK-NEXT: br i1 [[P]], label %[[IF_END4]], label %[[FOR_COND_CLEANUP_LOOPEXIT]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: call void @noreturn(ptr [[FOO_ARR]]) +; CHECK-NEXT: unreachable +; CHECK: [[IF_END4]]: +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[TMP0]], 54 +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: store i8 [[TMP1]], ptr [[ARRAYIDX7]], align 1 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]] +; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]] +; +entry: + %foo_arr = alloca [2 x i8], align 16 + %bar_arr = alloca [2 x i8], align 16 + call void @x(ptr nonnull %foo_arr) + %cmp14.not = icmp eq i32 %block_size, 0 + br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry + br label %for.body + +for.cond.cleanup.loopexit: ; preds = %if.end4 + br label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry + call void @x(ptr nonnull %bar_arr) + ret void + +for.body: + %i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ] + %cmp1 = icmp samesign ugt i32 %i.015, 2 + br i1 %cmp1, label %if.then, label %for.body.cont + +for.body.cont: ; preds = %for.body.preheader, %if.end4 + %p = call i1 @pred() + br i1 %p, label %if.end4, label %for.cond.cleanup.loopexit + +if.then: ; preds = %for.body + call void @noreturn(ptr %foo_arr) + unreachable + +if.end4: ; preds = %for.body + %arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015 + %0 = load i8, ptr %arrayidx, align 1 + %1 = xor i8 %0, 54 + %arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015 + store i8 %1, ptr %arrayidx7, align 1 + %inc = add nuw nsw i32 %i.015, 1 + %cmp = icmp ult i32 %inc, %block_size + br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit +} + +define void @no_optimize_depdendent_ubsan_trap(i32 %block_size) { +; CHECK-LABEL: define void @no_optimize_depdendent_ubsan_trap( +; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]]) +; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0 +; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]] +; CHECK: [[FOR_BODY_PREHEADER]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]: +; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]]) +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2 +; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[IF_END4]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: [[I_015_LCSSA:%.*]] = phi i32 [ [[I_015]], %[[FOR_BODY]] ] +; CHECK-NEXT: call void @noreturn_with_i32(i32 [[I_015_LCSSA]]) +; CHECK-NEXT: unreachable +; CHECK: [[IF_END4]]: +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[TMP0]], 54 +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: store i8 [[TMP1]], ptr [[ARRAYIDX7]], align 1 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]] +; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]] +; +entry: + %foo_arr = alloca [2 x i8], align 16 + %bar_arr = alloca [2 x i8], align 16 + call void @x(ptr nonnull %foo_arr) + %cmp14.not = icmp eq i32 %block_size, 0 + br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry + br label %for.body + +for.cond.cleanup.loopexit: ; preds = %if.end4 + br label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry + call void @x(ptr nonnull %bar_arr) + ret void + +for.body: ; preds = %for.body.preheader, %if.end4 + %i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ] + %cmp1 = icmp samesign ugt i32 %i.015, 2 + br i1 %cmp1, label %if.then, label %if.end4 + +if.then: ; preds = %for.body + call void @noreturn_with_i32(i32 %i.015) + unreachable + +if.end4: ; preds = %for.body + %arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015 + %0 = load i8, ptr %arrayidx, align 1 + %1 = xor i8 %0, 54 + %arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015 + store i8 %1, ptr %arrayidx7, align 1 + %inc = add nuw nsw i32 %i.015, 1 + %cmp = icmp ult i32 %inc, %block_size + br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit +} + +define void @no_optimize_depdendent_load_trap(i32 %block_size) { +; CHECK-LABEL: define void @no_optimize_depdendent_load_trap( +; CHECK-SAME: i32 [[BLOCK_SIZE:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[FOO_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: [[BAR_ARR:%.*]] = alloca [2 x i8], align 16 +; CHECK-NEXT: call void @x(ptr nonnull [[FOO_ARR]]) +; CHECK-NEXT: [[CMP14_NOT:%.*]] = icmp eq i32 [[BLOCK_SIZE]], 0 +; CHECK-NEXT: br i1 [[CMP14_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]] +; CHECK: [[FOR_BODY_PREHEADER]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]: +; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]] +; CHECK: [[FOR_COND_CLEANUP]]: +; CHECK-NEXT: call void @x(ptr nonnull [[BAR_ARR]]) +; CHECK-NEXT: ret void +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[I_015:%.*]] = phi i32 [ [[INC:%.*]], %[[IF_END4:.*]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[CMP1:%.*]] = icmp samesign ugt i32 [[I_015]], 2 +; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[IF_END4]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: [[I_015_LCSSA:%.*]] = load i8, ptr [[FOO_ARR]], align 1 +; CHECK-NEXT: call void @noreturn_with_i8(i8 [[I_015_LCSSA]]) +; CHECK-NEXT: unreachable +; CHECK: [[IF_END4]]: +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1024 x i8], ptr [[FOO_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[TMP0]], 54 +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw [1025 x i8], ptr [[BAR_ARR]], i64 0, i32 [[I_015]] +; CHECK-NEXT: store i8 [[TMP1]], ptr [[ARRAYIDX7]], align 1 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_015]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC]], [[BLOCK_SIZE]] +; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_COND_CLEANUP_LOOPEXIT]] +; +entry: + %foo_arr = alloca [2 x i8], align 16 + %bar_arr = alloca [2 x i8], align 16 + call void @x(ptr nonnull %foo_arr) + %cmp14.not = icmp eq i32 %block_size, 0 + br i1 %cmp14.not, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry + br label %for.body + +for.cond.cleanup.loopexit: ; preds = %if.end4 + br label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry + call void @x(ptr nonnull %bar_arr) + ret void + +for.body: ; preds = %for.body.preheader, %if.end4 + %i.015 = phi i32 [ %inc, %if.end4 ], [ 0, %for.body.preheader ] + %cmp1 = icmp samesign ugt i32 %i.015, 2 + br i1 %cmp1, label %if.then, label %if.end4 + +if.then: ; preds = %for.body + %r = load i8, ptr %foo_arr, align 1 + call void @noreturn_with_i8(i8 %r) + unreachable + +if.end4: ; preds = %for.body + %arrayidx = getelementptr inbounds nuw [1024 x i8], ptr %foo_arr, i64 0, i32 %i.015 + %0 = load i8, ptr %arrayidx, align 1 + %1 = xor i8 %0, 54 + %arrayidx7 = getelementptr inbounds nuw [1025 x i8], ptr %bar_arr, i64 0, i32 %i.015 + store i8 %1, ptr %arrayidx7, align 1 + %inc = add nuw nsw i32 %i.015, 1 + %cmp = icmp ult i32 %inc, %block_size + br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit +} + + +declare void @x(ptr noundef) local_unnamed_addr +declare i1 @pred() local_unnamed_addr + +declare void @llvm.trap() #0 +declare void @noreturn(ptr) #0 +declare void @noreturn_with_i32(i32) #0 +declare void @noreturn_with_i8(i8) #0 +declare void @noreturn_with_argmem(ptr) #1 + +attributes #0 = { cold noreturn nounwind memory(inaccessiblemem: write) } +attributes #1 = { cold noreturn nounwind memory(argmem: read) } |