; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 4 ; RUN: opt -passes='print' -disable-output %s 2>&1 | FileCheck %s target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" declare void @llvm.assume(i1) declare void @use(ptr noundef) ; %offset is known positive via assume, so we should be able to detect the ; forward dependence. define void @offset_i8_known_positive_via_assume_forward_dep_1(ptr %A, i64 %offset, i64 %N) { ; CHECK-LABEL: 'offset_i8_known_positive_via_assume_forward_dep_1' ; CHECK-NEXT: loop: ; CHECK-NEXT: Memory dependences are safe ; CHECK-NEXT: Dependences: ; CHECK-NEXT: Forward: ; CHECK-NEXT: %l = load i8, ptr %gep.off, align 4 -> ; CHECK-NEXT: store i8 %add, ptr %gep, align 4 ; CHECK-EMPTY: ; CHECK-NEXT: Run-time memory checks: ; CHECK-NEXT: Grouped accesses: ; CHECK-EMPTY: ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. ; CHECK-NEXT: SCEV assumptions: ; CHECK-EMPTY: ; CHECK-NEXT: Expressions re-written: ; entry: %c = icmp sgt i64 %offset, 0 call void @llvm.assume(i1 %c) %off = getelementptr inbounds i8, ptr %A, i64 %offset call void @use(ptr noundef %off) br label %loop loop: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] %gep.off = getelementptr inbounds i8, ptr %off, i64 %iv %l = load i8 , ptr %gep.off, align 4 %add = add nsw i8 %l, 5 %gep = getelementptr inbounds i8, ptr %A, i64 %iv store i8 %add, ptr %gep, align 4 %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %N br i1 %exitcond.not, label %exit, label %loop exit: ret void } define void @offset_i32_known_positive_via_assume_forward_dep_1(ptr %A, i64 %offset, i64 %N) { ; CHECK-LABEL: 'offset_i32_known_positive_via_assume_forward_dep_1' ; CHECK-NEXT: loop: ; CHECK-NEXT: Memory dependences are safe with run-time checks ; CHECK-NEXT: Dependences: ; CHECK-NEXT: Run-time memory checks: ; CHECK-NEXT: Check 0: ; CHECK-NEXT: Comparing group GRP0: ; CHECK-NEXT: %gep = getelementptr inbounds i32, ptr %A, i64 %iv ; CHECK-NEXT: Against group GRP1: ; CHECK-NEXT: %gep.off = getelementptr inbounds i32, ptr %off, i64 %iv ; CHECK-NEXT: Grouped accesses: ; CHECK-NEXT: Group GRP0: ; CHECK-NEXT: (Low: %A High: (-3 + (4 * %N) + %A)) ; CHECK-NEXT: Member: {%A,+,4}<%loop> ; CHECK-NEXT: Group GRP1: ; CHECK-NEXT: (Low: ((4 * %offset) + %A) High: (-3 + (4 * %offset) + (4 * %N) + %A)) ; CHECK-NEXT: Member: {((4 * %offset) + %A),+,4}<%loop> ; CHECK-EMPTY: ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. ; CHECK-NEXT: SCEV assumptions: ; CHECK-EMPTY: ; CHECK-NEXT: Expressions re-written: ; entry: %c = icmp sgt i64 %offset, 0 call void @llvm.assume(i1 %c) %off = getelementptr inbounds i32, ptr %A, i64 %offset call void @use(ptr noundef %off) br label %loop loop: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] %gep.off = getelementptr inbounds i32, ptr %off, i64 %iv %l = load i8 , ptr %gep.off, align 4 %add = add nsw i8 %l, 5 %gep = getelementptr inbounds i32, ptr %A, i64 %iv store i8 %add, ptr %gep, align 4 %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %N br i1 %exitcond.not, label %exit, label %loop exit: ret void } ; %offset is known positive via assume, so we should be able to detect the ; forward dependence. define void @offset_known_positive_via_assume_forward_dep_2(ptr %A, i64 %offset, i64 %N) { ; CHECK-LABEL: 'offset_known_positive_via_assume_forward_dep_2' ; CHECK-NEXT: loop: ; CHECK-NEXT: Memory dependences are safe ; CHECK-NEXT: Dependences: ; CHECK-NEXT: Forward: ; CHECK-NEXT: %l = load i32, ptr %gep.off, align 4 -> ; CHECK-NEXT: store i32 %add, ptr %gep, align 4 ; CHECK-EMPTY: ; CHECK-NEXT: Run-time memory checks: ; CHECK-NEXT: Grouped accesses: ; CHECK-EMPTY: ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. ; CHECK-NEXT: SCEV assumptions: ; CHECK-EMPTY: ; CHECK-NEXT: Expressions re-written: ; entry: %c = icmp sgt i64 %offset, 0 call void @llvm.assume(i1 %c) %c.2 = icmp slt i64 %offset, 20 call void @llvm.assume(i1 %c.2) %off = getelementptr inbounds i32, ptr %A, i64 %offset br label %loop loop: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] %gep.off = getelementptr inbounds i32, ptr %off, i64 %iv %l = load i32, ptr %gep.off, align 4 %add = add nsw i32 %l, 5 %gep = getelementptr inbounds i32, ptr %A, i64 %iv store i32 %add, ptr %gep, align 4 %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %N br i1 %exitcond.not, label %exit, label %loop exit: ret void } ; The range of %offset is known via assumes, but it may be positive or negative. define void @offset_may_be_negative_via_assume_unknown_dep(ptr %A, i64 %offset, i64 %N) { ; CHECK-LABEL: 'offset_may_be_negative_via_assume_unknown_dep' ; CHECK-NEXT: loop: ; CHECK-NEXT: Memory dependences are safe with run-time checks ; CHECK-NEXT: Dependences: ; CHECK-NEXT: Run-time memory checks: ; CHECK-NEXT: Check 0: ; CHECK-NEXT: Comparing group GRP0: ; CHECK-NEXT: %gep.mul.2 = getelementptr inbounds i32, ptr %A, i64 %iv ; CHECK-NEXT: Against group GRP1: ; CHECK-NEXT: %gep = getelementptr inbounds i32, ptr %off, i64 %iv ; CHECK-NEXT: Grouped accesses: ; CHECK-NEXT: Group GRP0: ; CHECK-NEXT: (Low: %A High: ((4 * %N) + %A)) ; CHECK-NEXT: Member: {%A,+,4}<%loop> ; CHECK-NEXT: Group GRP1: ; CHECK-NEXT: (Low: ((4 * %offset) + %A) High: ((4 * %offset) + (4 * %N) + %A)) ; CHECK-NEXT: Member: {((4 * %offset) + %A),+,4}<%loop> ; CHECK-EMPTY: ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. ; CHECK-NEXT: SCEV assumptions: ; CHECK-EMPTY: ; CHECK-NEXT: Expressions re-written: ; entry: %c = icmp sgt i64 %offset, -4 call void @llvm.assume(i1 %c) %c.2 = icmp slt i64 %offset, 20 call void @llvm.assume(i1 %c.2) %off = getelementptr inbounds i32, ptr %A, i64 %offset br label %loop loop: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] %gep = getelementptr inbounds i32, ptr %off, i64 %iv %l = load i32, ptr %gep, align 4 %add = add nsw i32 %l, 5 %gep.mul.2 = getelementptr inbounds i32, ptr %A, i64 %iv store i32 %add, ptr %gep.mul.2, align 4 %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %N br i1 %exitcond.not, label %exit, label %loop exit: ret void } define void @offset_no_assumes(ptr %A, i64 %offset, i64 %N) { ; CHECK-LABEL: 'offset_no_assumes' ; CHECK-NEXT: loop: ; CHECK-NEXT: Memory dependences are safe with run-time checks ; CHECK-NEXT: Dependences: ; CHECK-NEXT: Run-time memory checks: ; CHECK-NEXT: Check 0: ; CHECK-NEXT: Comparing group GRP0: ; CHECK-NEXT: %gep = getelementptr inbounds i32, ptr %A, i64 %iv ; CHECK-NEXT: Against group GRP1: ; CHECK-NEXT: %gep.off = getelementptr inbounds i32, ptr %off, i64 %iv ; CHECK-NEXT: Grouped accesses: ; CHECK-NEXT: Group GRP0: ; CHECK-NEXT: (Low: %A High: ((4 * %N) + %A)) ; CHECK-NEXT: Member: {%A,+,4}<%loop> ; CHECK-NEXT: Group GRP1: ; CHECK-NEXT: (Low: ((4 * %offset) + %A) High: ((4 * %offset) + (4 * %N) + %A)) ; CHECK-NEXT: Member: {((4 * %offset) + %A),+,4}<%loop> ; CHECK-EMPTY: ; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. ; CHECK-NEXT: SCEV assumptions: ; CHECK-EMPTY: ; CHECK-NEXT: Expressions re-written: ; entry: %off = getelementptr inbounds i32, ptr %A, i64 %offset br label %loop loop: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] %gep.off = getelementptr inbounds i32, ptr %off, i64 %iv %l = load i32, ptr %gep.off, align 4 %add = add nsw i32 %l, 5 %gep = getelementptr inbounds i32, ptr %A, i64 %iv store i32 %add, ptr %gep, align 4 %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %N br i1 %exitcond.not, label %exit, label %loop exit: ret void }