diff options
| author | Andrei Elovikov <andrei.elovikov@sifive.com> | 2026-02-20 11:33:45 -0800 |
|---|---|---|
| committer | Andrei Elovikov <andrei.elovikov@sifive.com> | 2026-03-06 10:06:12 -0800 |
| commit | 5c15036066b24f1e271ba5d7fee6a0fd8ef830a0 (patch) | |
| tree | 251a36a1b74afc3d08dc149f4d7765db9a73c2b2 | |
| parent | c5dff26233e78184dfb3fa0dbe732cbaa2d45c16 (diff) | |
| download | llvm-users/eas/vplan-based-stride-mv-tests.tar.gz llvm-users/eas/vplan-based-stride-mv-tests.tar.bz2 llvm-users/eas/vplan-based-stride-mv-tests.zip | |
[NFC][VPlan] Add initial tests for future VPlan-based stride MVusers/eas/vplan-based-stride-mv-tests
I tried to include both the features that current
LoopAccessAnalysis-based transformation supports (e.g., trunc/sext of
stride) but also cases where the current implementation behaves poorly,
e.g., https://godbolt.org/z/h31c3zKxK; as well as some other potentially
interesting scenarios I could imagine.
The are two test files with the same content. One is for VPlan dump change of
the future transformation alone (I'll update `-vplan-print-after` in the next
PR), another is for the full vectorizer pipeline. The latter have two `RUN:`
lines:
* No multiversioning, so the next PR diff can show the transformation itself
* Stride multiversionin performed in LAA, so that we can compare future
VPlan-based transformation vs old behavior.
| -rw-r--r-- | llvm/test/Transforms/LoopVectorize/VPlan/vplan-based-stride-mv.ll | 3421 | ||||
| -rw-r--r-- | llvm/test/Transforms/LoopVectorize/vplan-based-stride-mv.ll | 4730 |
2 files changed, 8151 insertions, 0 deletions
diff --git a/llvm/test/Transforms/LoopVectorize/VPlan/vplan-based-stride-mv.ll b/llvm/test/Transforms/LoopVectorize/VPlan/vplan-based-stride-mv.ll new file mode 100644 index 000000000000..62fe6629916f --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/VPlan/vplan-based-stride-mv.ll @@ -0,0 +1,3421 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -p loop-vectorize -force-vector-width=4 -disable-output \ +; RUN: -vplan-print-after=scalarizeMemOpsWithIrregularTypes \ +; RUN: -enable-mem-access-versioning=false 2>&1 | FileCheck %s + +define void @basic(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'basic' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +define void @basic_optsize(ptr noalias %p.out, ptr %p, i64 %stride) #0 { +; CHECK-LABEL: VPlan for loop in 'basic_optsize' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +attributes #0 = { optsize } + +define void @basic_minsize(ptr noalias %p.out, ptr %p, i64 %stride) #1 { +; CHECK-LABEL: VPlan for loop in 'basic_minsize' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +attributes #1 = { minsize } + +; When using byte-gep with wide memop unit-stride wouldn't be one at the geps +; index but rather mem-access-type-size. This test has constant multiplier equal +; to that exact value `8 * %stride` so that `%stride == 1` would result in +; unit-strided load. +define void @byte_gep_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'byte_gep_scaled_stride' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%stride.x8> = mul ir<%stride>, ir<8> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride.x8> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %stride.x8 = mul i64 %stride, 8 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride.x8 +; CHECK-NEXT: IR %gep.ld = getelementptr i8, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %stride.x8 = mul i64 %stride, 8 + %idx = mul i64 %iv, %stride.x8 + + %gep.ld = getelementptr i8, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Similar to above but constant multiplier is smaller than load type's width, so +; unit-strideness would require `%stride == 2`. +define void @byte_gep_under_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'byte_gep_under_scaled_stride' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%stride.x8> = mul ir<%stride>, ir<4> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride.x8> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %stride.x8 = mul i64 %stride, 4 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride.x8 +; CHECK-NEXT: IR %gep.ld = getelementptr i8, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %stride.x8 = mul i64 %stride, 4 + %idx = mul i64 %iv, %stride.x8 + + %gep.ld = getelementptr i8, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Another variation for constant multiplier with byte gep. This time the +; multiplier is bigger than load access type so this cannot be speculated for +; unit-strideness. +define void @byte_gep_over_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'byte_gep_over_scaled_stride' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%stride.x8> = mul ir<%stride>, ir<16> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride.x8> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %stride.x8 = mul i64 %stride, 16 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride.x8 +; CHECK-NEXT: IR %gep.ld = getelementptr i8, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %stride.x8 = mul i64 %stride, 16 + %idx = mul i64 %iv, %stride.x8 + + %gep.ld = getelementptr i8, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; And another one, with multiplier non-power-of-two. Cannot be speculated for +; unit-strideness. +define void @byte_gep_non_power_of_two_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'byte_gep_non_power_of_two_scaled_stride' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%stride.x8> = mul ir<%stride>, ir<11> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride.x8> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %stride.x8 = mul i64 %stride, 11 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride.x8 +; CHECK-NEXT: IR %gep.ld = getelementptr i8, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %stride.x8 = mul i64 %stride, 11 + %idx = mul i64 %iv, %stride.x8 + + %gep.ld = getelementptr i8, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; No constant multiplier, need to speculate `%stride == sizeof(load-access-type)`. +define void @byte_gep_nonscaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'byte_gep_nonscaled_stride' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i8, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i8, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; If we negate %stride before indexing, this might be a good heuristic to avoid +; stride speculation... At the very least, keep this test for the documentation +; purposes. +define void @byte_gep_negated_stride(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'byte_gep_negated_stride' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%stride.neg> = sub ir<0>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride.neg> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %stride.neg = sub i64 0, %stride +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride.neg +; CHECK-NEXT: IR %gep.ld = getelementptr i8, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %stride.neg = sub i64 0, %stride + %idx = mul i64 %iv, %stride.neg + + %gep.ld = getelementptr i8, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Two memory accesses can be speculated for unit-strideness by single predicate. +define void @shared_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'shared_stride' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld0> = getelementptr ir<%p0>, ir<%idx> +; CHECK-NEXT: EMIT ir<%gep.ld1> = getelementptr ir<%p1>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld0> = load ir<%gep.ld0> +; CHECK-NEXT: EMIT-SCALAR ir<%ld1> = load ir<%gep.ld1> +; CHECK-NEXT: EMIT ir<%val> = add ir<%ld0>, ir<%ld1> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld0 = getelementptr i64, ptr %p0, i64 %idx +; CHECK-NEXT: IR %gep.ld1 = getelementptr i64, ptr %p1, i64 %idx +; CHECK-NEXT: IR %ld0 = load i64, ptr %gep.ld0, align 8 +; CHECK-NEXT: IR %ld1 = load i64, ptr %gep.ld1, align 8 +; CHECK-NEXT: IR %val = add i64 %ld0, %ld1 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %val, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld0 = getelementptr i64, ptr %p0, i64 %idx + %gep.ld1 = getelementptr i64, ptr %p1, i64 %idx + %ld0 = load i64, ptr %gep.ld0, align 8 + %ld1 = load i64, ptr %gep.ld1, align 8 + %val = add i64 %ld0, %ld1 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Speculating one access for unit-strideness guarantees that the other one isn't. +define void @dependent_strides(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'dependent_strides' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%stride1> = add ir<%stride>, ir<1> +; CHECK-NEXT: EMIT ir<%idx0> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx1> = mul ir<%iv>, ir<%stride1> +; CHECK-NEXT: EMIT ir<%gep.ld0> = getelementptr ir<%p0>, ir<%idx0> +; CHECK-NEXT: EMIT ir<%gep.ld1> = getelementptr ir<%p1>, ir<%idx1> +; CHECK-NEXT: EMIT-SCALAR ir<%ld0> = load ir<%gep.ld0> +; CHECK-NEXT: EMIT-SCALAR ir<%ld1> = load ir<%gep.ld1> +; CHECK-NEXT: EMIT ir<%val> = add ir<%ld0>, ir<%ld1> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %stride1 = add i64 %stride, 1 +; CHECK-NEXT: IR %idx0 = mul i64 %iv, %stride +; CHECK-NEXT: IR %idx1 = mul i64 %iv, %stride1 +; CHECK-NEXT: IR %gep.ld0 = getelementptr i64, ptr %p0, i64 %idx0 +; CHECK-NEXT: IR %gep.ld1 = getelementptr i64, ptr %p1, i64 %idx1 +; CHECK-NEXT: IR %ld0 = load i64, ptr %gep.ld0, align 8 +; CHECK-NEXT: IR %ld1 = load i64, ptr %gep.ld1, align 8 +; CHECK-NEXT: IR %val = add i64 %ld0, %ld1 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %val, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %stride1 = add i64 %stride, 1 + %idx0 = mul i64 %iv, %stride + %idx1 = mul i64 %iv, %stride1 + + %gep.ld0 = getelementptr i64, ptr %p0, i64 %idx0 + %gep.ld1 = getelementptr i64, ptr %p1, i64 %idx1 + %ld0 = load i64, ptr %gep.ld0, align 8 + %ld1 = load i64, ptr %gep.ld1, align 8 + %val = add i64 %ld0, %ld1 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Same as above but in different order. Might be reasonable to have some +; heuristic to choose one over another instead of just speculating the first +; access. +define void @dependent_strides_reverse_order(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'dependent_strides_reverse_order' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%stride0> = add ir<%stride>, ir<1> +; CHECK-NEXT: EMIT ir<%idx0> = mul ir<%iv>, ir<%stride0> +; CHECK-NEXT: EMIT ir<%idx1> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld0> = getelementptr ir<%p0>, ir<%idx0> +; CHECK-NEXT: EMIT ir<%gep.ld1> = getelementptr ir<%p1>, ir<%idx1> +; CHECK-NEXT: EMIT-SCALAR ir<%ld0> = load ir<%gep.ld0> +; CHECK-NEXT: EMIT-SCALAR ir<%ld1> = load ir<%gep.ld1> +; CHECK-NEXT: EMIT ir<%val> = add ir<%ld0>, ir<%ld1> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %stride0 = add i64 %stride, 1 +; CHECK-NEXT: IR %idx0 = mul i64 %iv, %stride0 +; CHECK-NEXT: IR %idx1 = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld0 = getelementptr i64, ptr %p0, i64 %idx0 +; CHECK-NEXT: IR %gep.ld1 = getelementptr i64, ptr %p1, i64 %idx1 +; CHECK-NEXT: IR %ld0 = load i64, ptr %gep.ld0, align 8 +; CHECK-NEXT: IR %ld1 = load i64, ptr %gep.ld1, align 8 +; CHECK-NEXT: IR %val = add i64 %ld0, %ld1 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %val, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %stride0 = add i64 %stride, 1 + %idx0 = mul i64 %iv, %stride0 + %idx1 = mul i64 %iv, %stride + + %gep.ld0 = getelementptr i64, ptr %p0, i64 %idx0 + %gep.ld1 = getelementptr i64, ptr %p1, i64 %idx1 + %ld0 = load i64, ptr %gep.ld0, align 8 + %ld1 = load i64, ptr %gep.ld1, align 8 + %val = add i64 %ld0, %ld1 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Two dependent accesses again, but instead of strides being dependent on each +; other, we have the same stride but different access sizes. +define void @byte_dependent_byte_geps(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'byte_dependent_byte_geps' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld0> = getelementptr ir<%p0>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld0> = load ir<%gep.ld0> +; CHECK-NEXT: EMIT ir<%gep.ld1> = getelementptr ir<%p1>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld1> = load ir<%gep.ld1> +; CHECK-NEXT: EMIT-SCALAR ir<%ld1.ext> = sext ir<%ld1> to i64 +; CHECK-NEXT: EMIT ir<%val> = add ir<%ld0>, ir<%ld1.ext> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld0 = getelementptr i8, ptr %p0, i64 %idx +; CHECK-NEXT: IR %ld0 = load i64, ptr %gep.ld0, align 8 +; CHECK-NEXT: IR %gep.ld1 = getelementptr i8, ptr %p1, i64 %idx +; CHECK-NEXT: IR %ld1 = load i32, ptr %gep.ld1, align 8 +; CHECK-NEXT: IR %ld1.ext = sext i32 %ld1 to i64 +; CHECK-NEXT: IR %val = add i64 %ld0, %ld1.ext +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %val, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld0 = getelementptr i8, ptr %p0, i64 %idx + %ld0 = load i64, ptr %gep.ld0, align 8 + + %gep.ld1 = getelementptr i8, ptr %p1, i64 %idx + %ld1 = load i32, ptr %gep.ld1, align 8 + %ld1.ext = sext i32 %ld1 to i64 + + %val = add i64 %ld0, %ld1.ext + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Likewise but reverse order of accesses. +define void @byte_dependent_byte_geps_reverse_order(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'byte_dependent_byte_geps_reverse_order' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld1> = getelementptr ir<%p1>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld1> = load ir<%gep.ld1> +; CHECK-NEXT: EMIT-SCALAR ir<%ld1.ext> = sext ir<%ld1> to i64 +; CHECK-NEXT: EMIT ir<%gep.ld0> = getelementptr ir<%p0>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld0> = load ir<%gep.ld0> +; CHECK-NEXT: EMIT ir<%val> = add ir<%ld0>, ir<%ld1.ext> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld1 = getelementptr i8, ptr %p1, i64 %idx +; CHECK-NEXT: IR %ld1 = load i32, ptr %gep.ld1, align 8 +; CHECK-NEXT: IR %ld1.ext = sext i32 %ld1 to i64 +; CHECK-NEXT: IR %gep.ld0 = getelementptr i8, ptr %p0, i64 %idx +; CHECK-NEXT: IR %ld0 = load i64, ptr %gep.ld0, align 8 +; CHECK-NEXT: IR %val = add i64 %ld0, %ld1.ext +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %val, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld1 = getelementptr i8, ptr %p1, i64 %idx + %ld1 = load i32, ptr %gep.ld1, align 8 + %ld1.ext = sext i32 %ld1 to i64 + + %gep.ld0 = getelementptr i8, ptr %p0, i64 %idx + %ld0 = load i64, ptr %gep.ld0, align 8 + + %val = add i64 %ld0, %ld1.ext + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + + +; Interleave group with non-constant stride. Probabably doesn't make sense to +; speculate the stride here (as a heuristic). +define void @strided_interleave(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'strided_interleave' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld0> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT ir<%gep.ld1> = getelementptr ir<%gep.ld0>, ir<1> +; CHECK-NEXT: EMIT-SCALAR ir<%ld0> = load ir<%gep.ld0> +; CHECK-NEXT: EMIT-SCALAR ir<%ld1> = load ir<%gep.ld1> +; CHECK-NEXT: EMIT ir<%val> = add ir<%ld0>, ir<%ld1> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld0 = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %gep.ld1 = getelementptr i64, ptr %gep.ld0, i61 1 +; CHECK-NEXT: IR %ld0 = load i64, ptr %gep.ld0, align 8 +; CHECK-NEXT: IR %ld1 = load i64, ptr %gep.ld1, align 8 +; CHECK-NEXT: IR %val = add i64 %ld0, %ld1 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %val, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld0 = getelementptr i64, ptr %p, i64 %idx + %gep.ld1 = getelementptr i64, ptr %gep.ld0, i61 1 + %ld0 = load i64, ptr %gep.ld0, align 8 + %ld1 = load i64, ptr %gep.ld1, align 8 + %val = add i64 %ld0, %ld1 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + + +; Mem access ptr in the form of `(%base,+,%stride)<%header>` where %base is +; something present as in instruction in the loop body (although that would be a +; SCEV expression, not SCEVUnknown). +define void @in_loop_base(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset) { +; CHECK-LABEL: VPlan for loop in 'in_loop_base' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%mul> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = add ir<%mul>, ir<%offset> +; CHECK-NEXT: EMIT ir<%gep.ld.base> = getelementptr ir<%p>, ir<%offset> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%gep.ld.base>, ir<%mul> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %mul = mul i64 %iv, %stride +; CHECK-NEXT: IR %idx = add i64 %mul, %offset +; CHECK-NEXT: IR %gep.ld.base = getelementptr i64, ptr %p, i64 %offset +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %gep.ld.base, i64 %mul +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %mul = mul i64 %iv, %stride + %idx = add i64 %mul, %offset + + %gep.ld.base = getelementptr i64,ptr %p, i64 %offset + %gep.ld = getelementptr i64, ptr %gep.ld.base, i64 %mul + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Base (non-recurrent) part of the memory access pointer doesn't have +; corresponding IR value, e.g., `(base + %iv*%stirde) + %offset`. +define void @base_not_in_ir(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset) { +; CHECK-LABEL: VPlan for loop in 'base_not_in_ir' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%mul> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = add ir<%mul>, ir<%offset> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %mul = mul i64 %iv, %stride +; CHECK-NEXT: IR %idx = add i64 %mul, %offset +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %mul = mul i64 %iv, %stride + %idx = add i64 %mul, %offset + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; If the base pointer is uniform but not loop-invariant we could still speculate +; access to be unit-strided, although not implemented yet. +define void @non_invariant_uniform_base(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'non_invariant_uniform_base' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%iv.sdiv32> = sdiv ir<%iv>, ir<32> +; CHECK-NEXT: EMIT ir<%mul> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = add ir<%mul>, ir<%iv.sdiv32> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %iv.sdiv32 = sdiv i64 %iv, 32 +; CHECK-NEXT: IR %mul = mul i64 %iv, %stride +; CHECK-NEXT: IR %idx = add i64 %mul, %iv.sdiv32 +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %iv.sdiv32 = sdiv i64 %iv, 32 + + %mul = mul i64 %iv, %stride + %idx = add i64 %mul, %iv.sdiv32 + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +define void @non_invariant_uniform_stride(ptr noalias %p.out, ptr %p, ptr %p.uni) { +; CHECK-LABEL: VPlan for loop in 'non_invariant_uniform_stride' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%iv.sdiv32> = sdiv ir<%iv>, ir<32> +; CHECK-NEXT: EMIT ir<%gep.uni> = getelementptr ir<%p.uni>, ir<%iv.sdiv32> +; CHECK-NEXT: EMIT-SCALAR ir<%stride> = load ir<%gep.uni> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %iv.sdiv32 = sdiv i64 %iv, 32 +; CHECK-NEXT: IR %gep.uni = getelementptr i64, ptr %p.uni, i64 %iv.sdiv32 +; CHECK-NEXT: IR %stride = load i64, ptr %gep.uni, align 4 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %iv.sdiv32 = sdiv i64 %iv, 32 + + %gep.uni = getelementptr i64, ptr %p.uni, i64 %iv.sdiv32 + %stride = load i64, ptr %gep.uni + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Not valuable by itself, but as a basis for the subsequent test to ensure that +; non-constant trip count doesn't change anything by itself. Also show the order +; of checks between stride speculation and trip-count check when not +; tail-folding. +define void @non_constant_btc(ptr noalias %p.out, ptr %p, i64 %stride, i64 %n) { +; CHECK-LABEL: VPlan for loop in 'non_constant_btc' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: vp<[[VP3:%[0-9]+]]> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 smax %n) +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<%n> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP7]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, %n +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, %n + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; BTC == stride, so stride speculation would result in zero vector loop +; iterations. +define void @stride_as_btc(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'stride_as_btc' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: vp<[[VP3:%[0-9]+]]> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 smax %stride) +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<%stride> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP7]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, %stride +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, %stride + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Similar to above but a slightly more complex dependency between stride and +; BTC. +define void @stride_dependent_btc(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'stride_dependent_btc' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: vp<[[VP3:%[0-9]+]]> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: IR %n = add i64 %stride, 1 +; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 smax (1 + %stride)) +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<%n> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP7]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, %n +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + %n = add i64 %stride, 1 + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, %n + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; BTC dependent on %stride, but stride speculation doesn't necessarily mean no +; vector loop iterations. The test shows in which order we emit the run time +; checks for both. +define void @stride_btc_checks_order(ptr noalias %p.out, ptr %p, i64 %stride, i64 %m) { +; CHECK-LABEL: VPlan for loop in 'stride_btc_checks_order' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: vp<[[VP3:%[0-9]+]]> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: IR %n = mul i64 %m, %stride +; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 smax (%stride * %m)) +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<%n> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP7]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, %n +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + %n = mul i64 %m, %stride + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, %n + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; BTC fully defined by stride speculation but still allows vector loop execution. +define void @stride_dependent_btc_non_preventive(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'stride_dependent_btc_non_preventive' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: vp<[[VP3:%[0-9]+]]> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: IR %n = add i64 %stride, 3 +; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 smax (3 + %stride)) +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<%n> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP7]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, %n +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + %n = add i64 %stride, 3 + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, %n + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Doesn't pass legality as run-time memory dependencies check doesn't support +; strided accesses. If it did, the purpose of this test would be to show how all +; three run-time checks (mem deps/stride speculation/trip-count) would be +; ordered in respect to each other. Note that pointer aliasing check could +; potentially be simplified if done after stride speculation. However, that +; isn't necessarily the best idea because we could also multi-version for stride +; and keep aliasing part generic and shared by both vector loops.. +define void @stride_btc_memdep_triple_check(ptr %p, i64 %stride, i64 %out.offset) { +entry: + %p.out = getelementptr i8, ptr %p, i64 %out.offset + %n = add i64 %stride, 3 + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, %n + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Same as above but memdep check doesn't depend on stride +define void @stride_btc_independent_memdep_triple_check(ptr %p, ptr noalias %p2, i64 %stride, i64 %out.offset) { +; CHECK-LABEL: VPlan for loop in 'stride_btc_independent_memdep_triple_check' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: vp<[[VP3:%[0-9]+]]> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: IR %p.out = getelementptr i8, ptr %p2, i64 %out.offset +; CHECK-NEXT: IR %n = add i64 %stride, 3 +; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 smax (3 + %stride)) +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.ld2> = getelementptr ir<%p2>, ir<%iv> +; CHECK-NEXT: EMIT-SCALAR ir<%ld2> = load ir<%gep.ld2> +; CHECK-NEXT: EMIT ir<%val> = add ir<%ld>, ir<%ld2> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<%n> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP7]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.ld2 = getelementptr i64, ptr %p2, i64 %iv +; CHECK-NEXT: IR %ld2 = load i64, ptr %gep.ld2, align 8 +; CHECK-NEXT: IR %val = add i64 %ld, %ld2 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %val, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, %n +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + %p.out = getelementptr i8, ptr %p2, i64 %out.offset + %n = add i64 %stride, 3 + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.ld2 = getelementptr i64, ptr %p2, i64 %iv + %ld2 = load i64, ptr %gep.ld2, align 8 + + %val = add i64 %ld, %ld2 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, %n + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +define void @actual_stride_not_in_ir(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'actual_stride_not_in_ir' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%base> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%base>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %base = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %base, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %base = getelementptr i64, ptr %p, i64 %idx + %gep.ld = getelementptr i64, ptr %base, i64 %idx + + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Gep into multi-dimensional array. Strided last index can be speculated to +; result in a unit-strided memory access. +define void @nd_array_last_idx(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'nd_array_last_idx' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<1>, ir<42>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr [256 x [256 x i64]], ptr %p, i64 1, i64 42, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr [256 x [256 x i64]], ptr %p, i64 1, i64 42, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + + +; Strided inner index will never result in unit-strided memory-access, even if +; its stride is one. +define void @nd_array_non_last_idx(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'nd_array_non_last_idx' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<1>, ir<%idx>, ir<42> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr [256 x [256 x i64]], ptr %p, i64 1, i64 %idx, i64 42 +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr [256 x [256 x i64]], ptr %p, i64 1, i64 %idx, i64 42 + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Isn't unit-strided either. +define void @nd_array_multiple_idxs(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'nd_array_multiple_idxs' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<1>, ir<%idx>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr [256 x [256 x i64]], ptr %p, i64 1, i64 %idx, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr [256 x [256 x i64]], ptr %p, i64 1, i64 %idx, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Stride is used through `sext` in the loop. +define void @sext_stride(ptr noalias %p.out, ptr %p, i32 %stride.i32) { +; CHECK-LABEL: VPlan for loop in 'sext_stride' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT-SCALAR ir<%stride> = sext ir<%stride.i32> to i64 +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %stride = sext i32 %stride.i32 to i64 +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %stride = sext i32 %stride.i32 to i64 + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Stride is used through `trunc` in the loop. +define void @trunc_stride(ptr noalias %p.out, ptr %p, i64 %stride.i64) { +; CHECK-LABEL: VPlan for loop in 'trunc_stride' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT-SCALAR ir<%stride> = trunc ir<%stride.i64> to i32 +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i32 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %stride = trunc i64 %stride.i64 to i32 +; CHECK-NEXT: IR %iv.next = add nsw i32 %iv, 1 +; CHECK-NEXT: IR %idx = mul i32 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i32, ptr %p, i32 %idx +; CHECK-NEXT: IR %ld = load i32, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i32, ptr %p.out, i32 %iv +; CHECK-NEXT: IR store i32 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i32 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %header ] + %stride = trunc i64 %stride.i64 to i32 + %iv.next = add nsw i32 %iv, 1 + + %idx = mul i32 %iv, %stride + + %gep.ld = getelementptr i32, ptr %p, i32 %idx + %ld = load i32, ptr %gep.ld, align 8 + + %gep.st = getelementptr i32, ptr %p.out, i32 %iv + store i32 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i32 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; %stride is usedc through both `trunc`/`sext` for different accesses. +define void @trunc_ext_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i32 %stride) { +; CHECK-LABEL: VPlan for loop in 'trunc_ext_stride' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: IR %stride.trunc = trunc i32 %stride to i16 +; CHECK-NEXT: IR %stride.ext = sext i32 %stride to i64 +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT-SCALAR ir<%iv.trunc> = trunc ir<%iv> to i16 +; CHECK-NEXT: EMIT-SCALAR ir<%iv.ext> = sext ir<%iv> to i64 +; CHECK-NEXT: EMIT ir<%idx.trunc> = mul ir<%iv.trunc>, ir<%stride.trunc> +; CHECK-NEXT: EMIT ir<%idx.ext> = mul ir<%iv.ext>, ir<%stride.ext> +; CHECK-NEXT: EMIT ir<%gep.trunc> = getelementptr ir<%p0>, ir<%idx.trunc> +; CHECK-NEXT: EMIT ir<%gep.ext> = getelementptr ir<%p0>, ir<%idx.ext> +; CHECK-NEXT: EMIT-SCALAR ir<%ld.trunc> = load ir<%gep.trunc> +; CHECK-NEXT: EMIT-SCALAR ir<%ld.ext> = load ir<%gep.ext> +; CHECK-NEXT: EMIT ir<%val> = add ir<%ld.trunc>, ir<%ld.ext> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i32 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i32 %iv, 1 +; CHECK-NEXT: IR %iv.trunc = trunc i32 %iv to i16 +; CHECK-NEXT: IR %iv.ext = sext i32 %iv to i64 +; CHECK-NEXT: IR %idx.trunc = mul i16 %iv.trunc, %stride.trunc +; CHECK-NEXT: IR %idx.ext = mul i64 %iv.ext, %stride.ext +; CHECK-NEXT: IR %gep.trunc = getelementptr i32, ptr %p0, i16 %idx.trunc +; CHECK-NEXT: IR %gep.ext = getelementptr i32, ptr %p0, i64 %idx.ext +; CHECK-NEXT: IR %ld.trunc = load i32, ptr %gep.trunc, align 4 +; CHECK-NEXT: IR %ld.ext = load i32, ptr %gep.ext, align 4 +; CHECK-NEXT: IR %val = add i32 %ld.trunc, %ld.ext +; CHECK-NEXT: IR %gep.st = getelementptr i32, ptr %p.out, i32 %iv +; CHECK-NEXT: IR store i32 %val, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i32 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + %stride.trunc = trunc i32 %stride to i16 + %stride.ext = sext i32 %stride to i64 + br label %header + +header: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i32 %iv, 1 + + %iv.trunc = trunc i32 %iv to i16 + %iv.ext = sext i32 %iv to i64 + + %idx.trunc = mul i16 %iv.trunc, %stride.trunc + %idx.ext = mul i64 %iv.ext, %stride.ext + + %gep.trunc = getelementptr i32, ptr %p0, i16 %idx.trunc + %gep.ext = getelementptr i32, ptr %p0, i64 %idx.ext + + %ld.trunc = load i32, ptr %gep.trunc, align 4 + %ld.ext = load i32, ptr %gep.ext, align 4 + + %val = add i32 %ld.trunc, %ld.ext + + %gep.st = getelementptr i32, ptr %p.out, i32 %iv + store i32 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i32 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Check that we don't speculate unit-strided masked memory access if masked wide +; memory operation isn't legal (or that we properly pass the mask if it is). +define void @basic_masked(ptr noalias %p.out, ptr %p, i64 %stride, i64 %x) { +; CHECK-LABEL: VPlan for loop in 'basic_masked' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%c> = icmp sge ir<%iv>, ir<%x> +; CHECK-NEXT: Successor(s): if +; CHECK-EMPTY: +; CHECK-NEXT: if: +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride>, ir<%c> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld>, ir<%c> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st>, ir<%c> +; CHECK-NEXT: Successor(s): latch +; CHECK-EMPTY: +; CHECK-NEXT: latch: +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%c> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = or ir<%c>, vp<[[VP4]]> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128>, vp<[[VP5]]> +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP8]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %c = icmp sge i64 %iv, %x +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ] + %iv.next = add nsw i64 %iv, 1 + %c = icmp sge i64 %iv, %x + br i1 %c, label %if, label %latch + +if: + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + br label %latch + +latch: + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; See https://github.com/llvm/llvm-project/issues/162922. +define void @stride_poison(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'stride_poison' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<poison> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, poison +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, poison + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Tests above all used loads, make sure that store is supported too. +define void @basic_strided_store(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'basic_strided_store' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%iv> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%idx> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %iv +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %idx +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %iv + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %idx + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; This test shows how/if we scalarize address computation def-chain if that +; pointer has other non-scalar uses. +define void @ptr_vec_use(ptr noalias %p.out, ptr noalias %p.ptr.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'ptr_vec_use' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%gep.ptr.st> = getelementptr ir<%p.ptr.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%gep.ld>, ir<%gep.ptr.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %ld, ptr %gep.st, align 8 +; CHECK-NEXT: IR %gep.ptr.st = getelementptr ptr, ptr %p.ptr.out, i64 %iv +; CHECK-NEXT: IR store ptr %gep.ld, ptr %gep.ptr.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %gep.ptr.st = getelementptr ptr, ptr %p.ptr.out, i64 %iv + store ptr %gep.ld, ptr %gep.ptr.st + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Similar to above, but it's not the resulting pointer itself that has +; non-scalar use but something in the middle of its def-chain. +define void @stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'stride_idx_vec_use' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%val> = mul ir<%ld>, ir<%idx> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %idx = mul i64 %iv, %stride +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %val = mul i64 %ld, %idx +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %val, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %val = mul i64 %ld, %idx + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Another variation of the above, even longer def-chain. +define void @offset_stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) { +; CHECK-LABEL: VPlan for loop in 'offset_stride_idx_vec_use' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<128> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> +; CHECK-NEXT: EMIT ir<%iv.times.stride> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = add ir<%iv.times.stride>, ir<42> +; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> +; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: EMIT ir<%val> = mul ir<%ld>, ir<%idx> +; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> +; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> +; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<header> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<header>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.next = add nsw i64 %iv, 1 +; CHECK-NEXT: IR %iv.times.stride = mul i64 %iv, %stride +; CHECK-NEXT: IR %idx = add i64 %iv.times.stride, 42 +; CHECK-NEXT: IR %gep.ld = getelementptr i64, ptr %p, i64 %idx +; CHECK-NEXT: IR %ld = load i64, ptr %gep.ld, align 8 +; CHECK-NEXT: IR %val = mul i64 %ld, %idx +; CHECK-NEXT: IR %gep.st = getelementptr i64, ptr %p.out, i64 %iv +; CHECK-NEXT: IR store i64 %val, ptr %gep.st, align 8 +; CHECK-NEXT: IR %exitcond = icmp slt i64 %iv.next, 128 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-EMPTY: +; CHECK-NEXT: remark: <unknown>:0:0: loop not vectorized: value that could not be identified as reduction is used outside the loop +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %iv.times.stride = mul i64 %iv, %stride + %idx = add i64 %iv.times.stride, 42 + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %val = mul i64 %ld, %idx + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; No VPlan dump because `%gep.ld` phi doesn't pass legality currently. +define void @base_ptr_induction_vec_use(ptr noalias %p.out, ptr noalias %p.ptr.out, ptr %p, i64 %stride) { +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %gep.ld = phi ptr [ %p, %entry ], [ %gep.ld.next, %header ] + %iv.next = add nsw i64 %iv, 1 + %gep.ld.next = getelementptr inbounds i64, ptr %gep.ld, i64 %stride + + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %gep.ptr.st = getelementptr ptr, ptr %p.ptr.out, i64 %iv + store ptr %gep.ld, ptr %gep.ptr.st + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +define void @test_rewrite_iv_scevs(i32 %start, ptr %dst) { +; CHECK-LABEL: VPlan for loop in 'test_rewrite_iv_scevs' +; CHECK: VPlan ' for UF>=1' { +; CHECK-NEXT: Live-in vp<[[VP0:%[0-9]+]]> = VF +; CHECK-NEXT: Live-in vp<[[VP1:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VP2:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: vp<[[VP3:%[0-9]+]]> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: IR %start.ext = zext i32 %start to i64 +; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (100 + (-1 * (zext i32 %start to i64))<nsw>)<nsw> +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = EXPAND SCEV (zext i32 %start to i64) +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv.0> = WIDEN-INDUCTION ir<%start.ext>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: ir<%iv.1> = WIDEN-INDUCTION ir<0>, vp<[[VP4]]>, vp<[[VP0]]> +; CHECK-NEXT: EMIT ir<%gep.dst> = getelementptr ir<%dst>, ir<%iv.1> +; CHECK-NEXT: EMIT store ir<0.000000e+00>, ir<%gep.dst> +; CHECK-NEXT: EMIT ir<%iv.1.next> = add ir<%iv.1>, vp<[[VP4]]> +; CHECK-NEXT: EMIT ir<%iv.0.next> = add ir<%iv.0>, ir<1> +; CHECK-NEXT: EMIT ir<%ec> = icmp eq ir<%iv.0.next>, ir<100> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = exiting-iv-value ir<%iv.0> +; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = exiting-iv-value ir<%iv.1> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP7]]>, middle.block ], [ ir<%start.ext>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ vp<[[VP8]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<loop> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<loop>: +; CHECK-NEXT: IR %iv.0 = phi i64 [ %start.ext, %entry ], [ %iv.0.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %iv.1 = phi i64 [ 0, %entry ], [ %iv.1.next, %loop ] (extra operand: vp<%bc.resume.val>.1 from scalar.ph) +; CHECK-NEXT: IR %gep.dst = getelementptr float, ptr %dst, i64 %iv.1 +; CHECK-NEXT: IR store float 0.000000e+00, ptr %gep.dst, align 4 +; CHECK-NEXT: IR %iv.1.next = add i64 %iv.1, %start.ext +; CHECK-NEXT: IR %iv.0.next = add i64 %iv.0, 1 +; CHECK-NEXT: IR %ec = icmp eq i64 %iv.0.next, 100 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + %start.ext = zext i32 %start to i64 + br label %loop + +loop: + %iv.0 = phi i64 [ %start.ext, %entry ], [ %iv.0.next, %loop ] + %iv.1 = phi i64 [ 0, %entry ], [ %iv.1.next, %loop ] + %gep.dst = getelementptr float, ptr %dst, i64 %iv.1 + store float 0.0, ptr %gep.dst, align 4 + %iv.1.next = add i64 %iv.1, %start.ext + %iv.0.next = add i64 %iv.0, 1 + %ec = icmp eq i64 %iv.0.next, 100 + br i1 %ec, label %exit, label %loop + +exit: + ret void +} + +; Keep this in sync with the same under LoopVectorize/ diff --git a/llvm/test/Transforms/LoopVectorize/vplan-based-stride-mv.ll b/llvm/test/Transforms/LoopVectorize/vplan-based-stride-mv.ll new file mode 100644 index 000000000000..6390220e37f4 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/vplan-based-stride-mv.ll @@ -0,0 +1,4730 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 + +; RUN: opt < %s -p loop-vectorize -force-vector-width=4 -S \ +; RUN: -enable-mem-access-versioning=false 2>&1 | FileCheck %s --check-prefix COMPARE-NO-MV + +; RUN: opt < %s -p loop-vectorize -force-vector-width=4 -S \ +; RUN: -enable-mem-access-versioning=true 2>&1 | FileCheck %s --check-prefix COMPARE-LAA-MV + +define void @basic(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @basic( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @basic( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP3:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +define void @basic_optsize(ptr noalias %p.out, ptr %p, i64 %stride) #0 { +; COMPARE-NO-MV-LABEL: define void @basic_optsize( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) #[[ATTR0:[0-9]+]] { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @basic_optsize( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) #[[ATTR0:[0-9]+]] { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +attributes #0 = { optsize } + +define void @basic_minsize(ptr noalias %p.out, ptr %p, i64 %stride) #1 { +; COMPARE-NO-MV-LABEL: define void @basic_minsize( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) #[[ATTR1:[0-9]+]] { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @basic_minsize( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) #[[ATTR1:[0-9]+]] { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +attributes #1 = { minsize } + + +; When using byte-gep with wide memop unit-stride wouldn't be one at the geps +; index but rather mem-access-type-size. This test has constant multiplier equal +; to that exact value `8 * %stride` so that `%stride == 1` would result in +; unit-strided load. +define void @byte_gep_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @byte_gep_scaled_stride( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = shl <4 x i64> [[BROADCAST_SPLAT]], splat (i64 3) +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[TMP0]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @byte_gep_scaled_stride( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = shl <4 x i64> [[BROADCAST_SPLAT]], splat (i64 3) +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[TMP0]] +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP5]] +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %stride.x8 = mul i64 %stride, 8 + %idx = mul i64 %iv, %stride.x8 + + %gep.ld = getelementptr i8, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Similar to above but constant multiplier is smaller than load type's width, so +; unit-strideness would require `%stride == 2`. +define void @byte_gep_under_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @byte_gep_under_scaled_stride( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = shl <4 x i64> [[BROADCAST_SPLAT]], splat (i64 2) +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[TMP0]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @byte_gep_under_scaled_stride( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = shl <4 x i64> [[BROADCAST_SPLAT]], splat (i64 2) +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[TMP0]] +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP5]] +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %stride.x8 = mul i64 %stride, 4 + %idx = mul i64 %iv, %stride.x8 + + %gep.ld = getelementptr i8, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Another variation for constant multiplier with byte gep. This time the +; multiplier is bigger than load access type so this cannot be speculated for +; unit-strideness. +define void @byte_gep_over_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @byte_gep_over_scaled_stride( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = shl <4 x i64> [[BROADCAST_SPLAT]], splat (i64 4) +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[TMP0]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @byte_gep_over_scaled_stride( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = shl <4 x i64> [[BROADCAST_SPLAT]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[TMP0]] +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP5]] +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %stride.x8 = mul i64 %stride, 16 + %idx = mul i64 %iv, %stride.x8 + + %gep.ld = getelementptr i8, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; And another one, with multiplier non-power-of-two. Cannot be speculated for +; unit-strideness. +define void @byte_gep_non_power_of_two_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @byte_gep_non_power_of_two_scaled_stride( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul i64 [[STRIDE]], 11 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @byte_gep_non_power_of_two_scaled_stride( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = mul i64 [[STRIDE]], 11 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP5]] +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %stride.x8 = mul i64 %stride, 11 + %idx = mul i64 %iv, %stride.x8 + + %gep.ld = getelementptr i8, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; No constant multiplier, need to speculate `%stride == sizeof(load-access-type)`. +define void @byte_gep_nonscaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @byte_gep_nonscaled_stride( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @byte_gep_nonscaled_stride( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[P]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = load i64, ptr [[TMP4]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = insertelement <4 x i64> poison, i64 [[TMP8]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> [[TMP12]], i64 [[TMP9]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP15]], ptr [[TMP16]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i8, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP11:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i8, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; If we negate %stride before indexing, this might be a good heuristic to avoid +; stride speculation... At the very least, keep this test for the documentation +; purposes. +define void @byte_gep_negated_stride(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @byte_gep_negated_stride( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = sub i64 0, [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @byte_gep_negated_stride( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = sub i64 0, [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP5]] +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %stride.neg = sub i64 0, %stride + %idx = mul i64 %iv, %stride.neg + + %gep.ld = getelementptr i8, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Two memory accesses can be speculated for unit-strideness by single predicate. +define void @shared_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @shared_stride( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> poison, i64 [[TMP13]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = insertelement <4 x i64> [[TMP17]], i64 [[TMP14]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> [[TMP18]], i64 [[TMP15]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = insertelement <4 x i64> [[TMP19]], i64 [[TMP16]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP10]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = load i64, ptr [[TMP11]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = load i64, ptr [[TMP12]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i64> poison, i64 [[TMP21]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i64> [[TMP25]], i64 [[TMP22]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP27:%.*]] = insertelement <4 x i64> [[TMP26]], i64 [[TMP23]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP28:%.*]] = insertelement <4 x i64> [[TMP27]], i64 [[TMP24]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP29:%.*]] = add <4 x i64> [[TMP20]], [[TMP28]] +; COMPARE-NO-MV-NEXT: [[TMP30:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP29]], ptr [[TMP30]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @shared_stride( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P0]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P1]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP2]], ptr [[TMP3]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD0:%.*]] = getelementptr i64, ptr [[P0]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD1:%.*]] = getelementptr i64, ptr [[P1]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD0:%.*]] = load i64, ptr [[GEP_LD0]], align 8 +; COMPARE-LAA-MV-NEXT: [[LD1:%.*]] = load i64, ptr [[GEP_LD1]], align 8 +; COMPARE-LAA-MV-NEXT: [[VAL:%.*]] = add i64 [[LD0]], [[LD1]] +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP14:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld0 = getelementptr i64, ptr %p0, i64 %idx + %gep.ld1 = getelementptr i64, ptr %p1, i64 %idx + %ld0 = load i64, ptr %gep.ld0, align 8 + %ld1 = load i64, ptr %gep.ld1, align 8 + %val = add i64 %ld0, %ld1 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Speculating one access for unit-strideness guarantees that the other one isn't. +define void @dependent_strides(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @dependent_strides( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], splat (i64 1) +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = mul <4 x i64> [[VEC_IND]], [[TMP0]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[TMP6]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = extractelement <4 x i64> [[TMP6]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = extractelement <4 x i64> [[TMP6]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[TMP6]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP7]] +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP8]] +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP9]] +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP10]] +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = load i64, ptr [[TMP11]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = load i64, ptr [[TMP12]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP13]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP14]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = insertelement <4 x i64> poison, i64 [[TMP19]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = insertelement <4 x i64> [[TMP23]], i64 [[TMP20]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i64> [[TMP24]], i64 [[TMP21]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i64> [[TMP25]], i64 [[TMP22]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP27:%.*]] = load i64, ptr [[TMP15]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP28:%.*]] = load i64, ptr [[TMP16]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP30:%.*]] = load i64, ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP31:%.*]] = insertelement <4 x i64> poison, i64 [[TMP27]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP32:%.*]] = insertelement <4 x i64> [[TMP31]], i64 [[TMP28]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP33:%.*]] = insertelement <4 x i64> [[TMP32]], i64 [[TMP29]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP34:%.*]] = insertelement <4 x i64> [[TMP33]], i64 [[TMP30]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP35:%.*]] = add <4 x i64> [[TMP26]], [[TMP34]] +; COMPARE-NO-MV-NEXT: [[TMP36:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP35]], ptr [[TMP36]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP37]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @dependent_strides( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = shl <4 x i64> [[VEC_IND]], splat (i64 1) +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P0]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP6]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP8]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP3]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[TMP17]] +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP18]], ptr [[TMP19]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[STRIDE1:%.*]] = add i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX0:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[IDX1:%.*]] = mul i64 [[IV]], [[STRIDE1]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD0:%.*]] = getelementptr i64, ptr [[P0]], i64 [[IDX0]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD1:%.*]] = getelementptr i64, ptr [[P1]], i64 [[IDX1]] +; COMPARE-LAA-MV-NEXT: [[LD0:%.*]] = load i64, ptr [[GEP_LD0]], align 8 +; COMPARE-LAA-MV-NEXT: [[LD1:%.*]] = load i64, ptr [[GEP_LD1]], align 8 +; COMPARE-LAA-MV-NEXT: [[VAL:%.*]] = add i64 [[LD0]], [[LD1]] +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP16:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %stride1 = add i64 %stride, 1 + %idx0 = mul i64 %iv, %stride + %idx1 = mul i64 %iv, %stride1 + + %gep.ld0 = getelementptr i64, ptr %p0, i64 %idx0 + %gep.ld1 = getelementptr i64, ptr %p1, i64 %idx1 + %ld0 = load i64, ptr %gep.ld0, align 8 + %ld1 = load i64, ptr %gep.ld1, align 8 + %val = add i64 %ld0, %ld1 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Same as above but in different order. Might be reasonable to have some +; heuristic to choose one over another instead of just speculating the first +; access. +define void @dependent_strides_reverse_order(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @dependent_strides_reverse_order( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], splat (i64 1) +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[TMP0]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[TMP6]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = extractelement <4 x i64> [[TMP6]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = extractelement <4 x i64> [[TMP6]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[TMP6]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP7]] +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP8]] +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP9]] +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP10]] +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = load i64, ptr [[TMP11]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = load i64, ptr [[TMP12]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP13]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP14]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = insertelement <4 x i64> poison, i64 [[TMP19]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = insertelement <4 x i64> [[TMP23]], i64 [[TMP20]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i64> [[TMP24]], i64 [[TMP21]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i64> [[TMP25]], i64 [[TMP22]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP27:%.*]] = load i64, ptr [[TMP15]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP28:%.*]] = load i64, ptr [[TMP16]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP30:%.*]] = load i64, ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP31:%.*]] = insertelement <4 x i64> poison, i64 [[TMP27]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP32:%.*]] = insertelement <4 x i64> [[TMP31]], i64 [[TMP28]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP33:%.*]] = insertelement <4 x i64> [[TMP32]], i64 [[TMP29]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP34:%.*]] = insertelement <4 x i64> [[TMP33]], i64 [[TMP30]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP35:%.*]] = add <4 x i64> [[TMP26]], [[TMP34]] +; COMPARE-NO-MV-NEXT: [[TMP36:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP35]], ptr [[TMP36]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP37]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @dependent_strides_reverse_order( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = shl <4 x i64> [[VEC_IND]], splat (i64 1) +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP5]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP7]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P1]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP2]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP4]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP9]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = add <4 x i64> [[TMP17]], [[WIDE_LOAD]] +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP18]], ptr [[TMP19]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[STRIDE0:%.*]] = add i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX0:%.*]] = mul i64 [[IV]], [[STRIDE0]] +; COMPARE-LAA-MV-NEXT: [[IDX1:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD0:%.*]] = getelementptr i64, ptr [[P0]], i64 [[IDX0]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD1:%.*]] = getelementptr i64, ptr [[P1]], i64 [[IDX1]] +; COMPARE-LAA-MV-NEXT: [[LD0:%.*]] = load i64, ptr [[GEP_LD0]], align 8 +; COMPARE-LAA-MV-NEXT: [[LD1:%.*]] = load i64, ptr [[GEP_LD1]], align 8 +; COMPARE-LAA-MV-NEXT: [[VAL:%.*]] = add i64 [[LD0]], [[LD1]] +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP18:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %stride0 = add i64 %stride, 1 + %idx0 = mul i64 %iv, %stride0 + %idx1 = mul i64 %iv, %stride + + %gep.ld0 = getelementptr i64, ptr %p0, i64 %idx0 + %gep.ld1 = getelementptr i64, ptr %p1, i64 %idx1 + %ld0 = load i64, ptr %gep.ld0, align 8 + %ld1 = load i64, ptr %gep.ld1, align 8 + %val = add i64 %ld0, %ld1 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Two dependent accesses again, but instead of strides being dependent on each +; other, we have the same stride but different access sizes. +define void @byte_dependent_byte_geps(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @byte_dependent_byte_geps( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP19]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP20]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> poison, i32 [[TMP21]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP22]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP26]], i32 [[TMP23]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP28:%.*]] = insertelement <4 x i32> [[TMP27]], i32 [[TMP24]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP29:%.*]] = sext <4 x i32> [[TMP28]] to <4 x i64> +; COMPARE-NO-MV-NEXT: [[TMP30:%.*]] = add <4 x i64> [[TMP16]], [[TMP29]] +; COMPARE-NO-MV-NEXT: [[TMP31:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP30]], ptr [[TMP31]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @byte_dependent_byte_geps( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[P0]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = load i64, ptr [[TMP4]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = insertelement <4 x i64> poison, i64 [[TMP8]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> [[TMP12]], i64 [[TMP9]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[P1]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP16]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP17]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP18]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP19]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> poison, i32 [[TMP20]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP24]], i32 [[TMP21]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP22]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP26]], i32 [[TMP23]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP28:%.*]] = sext <4 x i32> [[TMP27]] to <4 x i64> +; COMPARE-LAA-MV-NEXT: [[TMP29:%.*]] = add <4 x i64> [[TMP15]], [[TMP28]] +; COMPARE-LAA-MV-NEXT: [[TMP30:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP29]], ptr [[TMP30]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD0:%.*]] = load i64, ptr [[GEP_LD0]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_LD1:%.*]] = getelementptr i8, ptr [[P1]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD1:%.*]] = load i32, ptr [[GEP_LD1]], align 8 +; COMPARE-LAA-MV-NEXT: [[LD1_EXT:%.*]] = sext i32 [[LD1]] to i64 +; COMPARE-LAA-MV-NEXT: [[VAL:%.*]] = add i64 [[LD0]], [[LD1_EXT]] +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP20:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld0 = getelementptr i8, ptr %p0, i64 %idx + %ld0 = load i64, ptr %gep.ld0, align 8 + + %gep.ld1 = getelementptr i8, ptr %p1, i64 %idx + %ld1 = load i32, ptr %gep.ld1, align 8 + %ld1.ext = sext i32 %ld1 to i64 + + %val = add i64 %ld0, %ld1.ext + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Likewise but reverse order of accesses. +define void @byte_dependent_byte_geps_reverse_order(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @byte_dependent_byte_geps_reverse_order( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> poison, i32 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP13]], i32 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i32> [[TMP14]], i32 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i32> [[TMP15]], i32 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = sext <4 x i32> [[TMP16]] to <4 x i64> +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = load i64, ptr [[TMP19]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = load i64, ptr [[TMP20]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = load i64, ptr [[TMP21]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i64> poison, i64 [[TMP22]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP27:%.*]] = insertelement <4 x i64> [[TMP26]], i64 [[TMP23]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP28:%.*]] = insertelement <4 x i64> [[TMP27]], i64 [[TMP24]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP29:%.*]] = insertelement <4 x i64> [[TMP28]], i64 [[TMP25]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP30:%.*]] = add <4 x i64> [[TMP29]], [[TMP17]] +; COMPARE-NO-MV-NEXT: [[TMP31:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP30]], ptr [[TMP31]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @byte_dependent_byte_geps_reverse_order( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[P1]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP4]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = insertelement <4 x i32> poison, i32 [[TMP8]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> [[TMP12]], i32 [[TMP9]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP13]], i32 [[TMP10]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i32> [[TMP14]], i32 [[TMP11]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = sext <4 x i32> [[TMP15]] to <4 x i64> +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[P0]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP17]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP18]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP23:%.*]] = load i64, ptr [[TMP19]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP24:%.*]] = load i64, ptr [[TMP20]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i64> poison, i64 [[TMP21]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i64> [[TMP25]], i64 [[TMP22]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP27:%.*]] = insertelement <4 x i64> [[TMP26]], i64 [[TMP23]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP28:%.*]] = insertelement <4 x i64> [[TMP27]], i64 [[TMP24]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP29:%.*]] = add <4 x i64> [[TMP28]], [[TMP16]] +; COMPARE-LAA-MV-NEXT: [[TMP30:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP29]], ptr [[TMP30]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD1:%.*]] = getelementptr i8, ptr [[P1]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD1:%.*]] = load i32, ptr [[GEP_LD1]], align 8 +; COMPARE-LAA-MV-NEXT: [[LD1_EXT:%.*]] = sext i32 [[LD1]] to i64 +; COMPARE-LAA-MV-NEXT: [[GEP_LD0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD0:%.*]] = load i64, ptr [[GEP_LD0]], align 8 +; COMPARE-LAA-MV-NEXT: [[VAL:%.*]] = add i64 [[LD0]], [[LD1_EXT]] +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP22:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld1 = getelementptr i8, ptr %p1, i64 %idx + %ld1 = load i32, ptr %gep.ld1, align 8 + %ld1.ext = sext i32 %ld1 to i64 + + %gep.ld0 = getelementptr i8, ptr %p0, i64 %idx + %ld0 = load i64, ptr %gep.ld0, align 8 + + %val = add i64 %ld0, %ld1.ext + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + + +; Interleave group with non-constant stride. Probabably doesn't make sense to +; speculate the stride here (as a heuristic). +define void @strided_interleave(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @strided_interleave( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[TMP5]], i61 1 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[TMP6]], i61 1 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[TMP7]], i61 1 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[TMP8]], i61 1 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> poison, i64 [[TMP13]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = insertelement <4 x i64> [[TMP17]], i64 [[TMP14]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> [[TMP18]], i64 [[TMP15]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = insertelement <4 x i64> [[TMP19]], i64 [[TMP16]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP10]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = load i64, ptr [[TMP11]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = load i64, ptr [[TMP12]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i64> poison, i64 [[TMP21]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i64> [[TMP25]], i64 [[TMP22]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP27:%.*]] = insertelement <4 x i64> [[TMP26]], i64 [[TMP23]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP28:%.*]] = insertelement <4 x i64> [[TMP27]], i64 [[TMP24]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP29:%.*]] = add <4 x i64> [[TMP20]], [[TMP28]] +; COMPARE-NO-MV-NEXT: [[TMP30:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP29]], ptr [[TMP30]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @strided_interleave( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[TMP0]], i61 1 +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP2]], ptr [[TMP3]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD0:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD1:%.*]] = getelementptr i64, ptr [[GEP_LD0]], i61 1 +; COMPARE-LAA-MV-NEXT: [[LD0:%.*]] = load i64, ptr [[GEP_LD0]], align 8 +; COMPARE-LAA-MV-NEXT: [[LD1:%.*]] = load i64, ptr [[GEP_LD1]], align 8 +; COMPARE-LAA-MV-NEXT: [[VAL:%.*]] = add i64 [[LD0]], [[LD1]] +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP24:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld0 = getelementptr i64, ptr %p, i64 %idx + %gep.ld1 = getelementptr i64, ptr %gep.ld0, i61 1 + %ld0 = load i64, ptr %gep.ld0, align 8 + %ld1 = load i64, ptr %gep.ld1, align 8 + %val = add i64 %ld0, %ld1 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + + +; Mem access ptr in the form of `(%base,+,%stride)<%header>` where %base is +; something present as in instruction in the loop body (although that would be a +; SCEV expression, not SCEVUnknown). +define void @in_loop_base(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset) { +; COMPARE-NO-MV-LABEL: define void @in_loop_base( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[OFFSET:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[TMP0]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[TMP0]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[TMP0]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[TMP0]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @in_loop_base( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[OFFSET:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[TMP0]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP2]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[MUL:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = add i64 [[MUL]], [[OFFSET]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD_BASE:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[GEP_LD_BASE]], i64 [[MUL]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP26:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %mul = mul i64 %iv, %stride + %idx = add i64 %mul, %offset + + %gep.ld.base = getelementptr i64,ptr %p, i64 %offset + %gep.ld = getelementptr i64, ptr %gep.ld.base, i64 %mul + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Base (non-recurrent) part of the memory access pointer doesn't have +; corresponding IR value, e.g., `(base + %iv*%stirde) + %offset`. +define void @base_not_in_ir(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset) { +; COMPARE-NO-MV-LABEL: define void @base_not_in_ir( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[OFFSET:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i64> poison, i64 [[OFFSET]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT1]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = add <4 x i64> [[TMP0]], [[BROADCAST_SPLAT2]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @base_not_in_ir( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[OFFSET:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], [[OFFSET]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP0]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP2]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[MUL:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = add i64 [[MUL]], [[OFFSET]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP28:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %mul = mul i64 %iv, %stride + %idx = add i64 %mul, %offset + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; If the base pointer is uniform but not loop-invariant we could still speculate +; access to be unit-strided, although not implemented yet. +define void @non_invariant_uniform_base(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @non_invariant_uniform_base( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = sdiv <4 x i64> [[VEC_IND]], splat (i64 32) +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = add <4 x i64> [[TMP1]], [[TMP0]] +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP2]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP2]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP2]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = extractelement <4 x i64> [[TMP2]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP6]] +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP10]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> poison, i64 [[TMP11]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = insertelement <4 x i64> [[TMP17]], i64 [[TMP14]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP18]], ptr [[TMP19]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @non_invariant_uniform_base( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = sdiv <4 x i64> [[VEC_IND]], splat (i64 32) +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = add <4 x i64> [[TMP1]], [[TMP0]] +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP2]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP2]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP2]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = extractelement <4 x i64> [[TMP2]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP5]] +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP6]] +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP10]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> poison, i64 [[TMP11]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = insertelement <4 x i64> [[TMP17]], i64 [[TMP14]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP18]], ptr [[TMP19]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %iv.sdiv32 = sdiv i64 %iv, 32 + + %mul = mul i64 %iv, %stride + %idx = add i64 %mul, %iv.sdiv32 + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +define void @non_invariant_uniform_stride(ptr noalias %p.out, ptr %p, ptr %p.uni) { +; COMPARE-NO-MV-LABEL: define void @non_invariant_uniform_stride( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], ptr [[P_UNI:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = sdiv i64 [[INDEX]], 32 +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P_UNI]], i64 [[TMP0]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 4 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP2]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP3]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP3]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = extractelement <4 x i64> [[TMP3]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[TMP3]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP6]] +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP7]] +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP10]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP11]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> poison, i64 [[TMP12]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = insertelement <4 x i64> [[TMP17]], i64 [[TMP14]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> [[TMP18]], i64 [[TMP15]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP19]], ptr [[TMP20]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @non_invariant_uniform_stride( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], ptr [[P_UNI:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = sdiv i64 [[INDEX]], 32 +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P_UNI]], i64 [[TMP0]] +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 4 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP2]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP3]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP3]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = extractelement <4 x i64> [[TMP3]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[TMP3]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP5]] +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP6]] +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP7]] +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP10]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP11]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> poison, i64 [[TMP12]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = insertelement <4 x i64> [[TMP17]], i64 [[TMP14]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> [[TMP18]], i64 [[TMP15]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP20:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP19]], ptr [[TMP20]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %iv.sdiv32 = sdiv i64 %iv, 32 + + %gep.uni = getelementptr i64, ptr %p.uni, i64 %iv.sdiv32 + %stride = load i64, ptr %gep.uni + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Not valuable by itself, but as a basis for the subsequent test to ensure that +; non-constant trip count doesn't change anything by itself. Also show the order +; of checks between stride speculation and trip-count check when not +; tail-folding. +define void @non_constant_btc(ptr noalias %p.out, ptr %p, i64 %stride, i64 %n) { +; COMPARE-NO-MV-LABEL: define void @non_constant_btc( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[N:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-NO-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) +; COMPARE-NO-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 +; COMPARE-NO-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 +; COMPARE-NO-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP22:![0-9]+]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @non_constant_btc( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[N:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-LAA-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) +; COMPARE-LAA-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 +; COMPARE-LAA-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 +; COMPARE-LAA-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] +; COMPARE-LAA-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ] +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP32:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, %n + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; BTC == stride, so stride speculation would result in zero vector loop +; iterations. +define void @stride_as_btc(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @stride_as_btc( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-NO-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[STRIDE]], i64 1) +; COMPARE-NO-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 +; COMPARE-NO-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 +; COMPARE-NO-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP24:![0-9]+]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @stride_as_btc( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, %stride + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Similar to above but a slightly more complex dependency between stride and +; BTC. +define void @stride_dependent_btc(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @stride_dependent_btc( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-NO-MV-NEXT: [[N:%.*]] = add i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) +; COMPARE-NO-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 +; COMPARE-NO-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 +; COMPARE-NO-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP26:![0-9]+]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @stride_dependent_btc( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-LAA-MV-NEXT: [[N:%.*]] = add i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + %n = add i64 %stride, 1 + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, %n + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; BTC dependent on %stride, but stride speculation doesn't necessarily mean no +; vector loop iterations. The test shows in which order we emit the run time +; checks for both. +define void @stride_btc_checks_order(ptr noalias %p.out, ptr %p, i64 %stride, i64 %m) { +; COMPARE-NO-MV-LABEL: define void @stride_btc_checks_order( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[M:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-NO-MV-NEXT: [[N:%.*]] = mul i64 [[M]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) +; COMPARE-NO-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 +; COMPARE-NO-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 +; COMPARE-NO-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP28:![0-9]+]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @stride_btc_checks_order( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[M:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-LAA-MV-NEXT: [[N:%.*]] = mul i64 [[M]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) +; COMPARE-LAA-MV-NEXT: [[SMAX1:%.*]] = call i64 @llvm.smax.i64(i64 [[M]], i64 1) +; COMPARE-LAA-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX1]], 4 +; COMPARE-LAA-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX1]], 4 +; COMPARE-LAA-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX1]], [[N_MOD_VF]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX1]], [[N_VEC]] +; COMPARE-LAA-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ] +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP34:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + %n = mul i64 %m, %stride + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, %n + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; BTC fully defined by stride speculation but still allows vector loop execution. +define void @stride_dependent_btc_non_preventive(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @stride_dependent_btc_non_preventive( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-NO-MV-NEXT: [[N:%.*]] = add i64 [[STRIDE]], 3 +; COMPARE-NO-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) +; COMPARE-NO-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 +; COMPARE-NO-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 +; COMPARE-NO-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP30:![0-9]+]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @stride_dependent_btc_non_preventive( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: [[N:%.*]] = add i64 [[STRIDE]], 3 +; COMPARE-LAA-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[P]], align 8 +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[P_OUT]], align 8 +; COMPARE-LAA-MV-NEXT: br label %[[MIDDLE_BLOCK:.*]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP35:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + %n = add i64 %stride, 3 + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, %n + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Doesn't pass legality as run-time memory dependencies check doesn't support +; strided accesses. If it did, the purpose of this test would be to show how all +; three run-time checks (mem deps/stride speculation/trip-count) would be +; ordered in respect to each other. Note that pointer aliasing check could +; potentially be simplified if done after stride speculation. However, that +; isn't necessarily the best idea because we could also multi-version for stride +; and keep aliasing part generic and shared by both vector loops.. +define void @stride_btc_memdep_triple_check(ptr %p, i64 %stride, i64 %out.offset) { +; COMPARE-NO-MV-LABEL: define void @stride_btc_memdep_triple_check( +; COMPARE-NO-MV-SAME: ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[OUT_OFFSET:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-NO-MV-NEXT: [[P_OUT:%.*]] = getelementptr i8, ptr [[P]], i64 [[OUT_OFFSET]] +; COMPARE-NO-MV-NEXT: [[N:%.*]] = add i64 [[STRIDE]], 3 +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @stride_btc_memdep_triple_check( +; COMPARE-LAA-MV-SAME: ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[OUT_OFFSET:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: [[P_OUT:%.*]] = getelementptr i8, ptr [[P]], i64 [[OUT_OFFSET]] +; COMPARE-LAA-MV-NEXT: [[N:%.*]] = add i64 [[STRIDE]], 3 +; COMPARE-LAA-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_MEMCHECK]]: +; COMPARE-LAA-MV-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[OUT_OFFSET]], 32 +; COMPARE-LAA-MV-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[P]], align 8 +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[P_OUT]], align 8 +; COMPARE-LAA-MV-NEXT: br label %[[MIDDLE_BLOCK:.*]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP36:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + %p.out = getelementptr i8, ptr %p, i64 %out.offset + %n = add i64 %stride, 3 + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, %n + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Same as above but memdep check doesn't depend on stride +define void @stride_btc_independent_memdep_triple_check(ptr %p, ptr noalias %p2, i64 %stride, i64 %out.offset) { +; COMPARE-NO-MV-LABEL: define void @stride_btc_independent_memdep_triple_check( +; COMPARE-NO-MV-SAME: ptr [[P:%.*]], ptr noalias [[P2:%.*]], i64 [[STRIDE:%.*]], i64 [[OUT_OFFSET:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-NO-MV-NEXT: [[P_OUT:%.*]] = getelementptr i8, ptr [[P2]], i64 [[OUT_OFFSET]] +; COMPARE-NO-MV-NEXT: [[N:%.*]] = add i64 [[STRIDE]], 3 +; COMPARE-NO-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) +; COMPARE-NO-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 +; COMPARE-NO-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; COMPARE-NO-MV: [[VECTOR_MEMCHECK]]: +; COMPARE-NO-MV-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[OUT_OFFSET]], 32 +; COMPARE-NO-MV-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 +; COMPARE-NO-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P2]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = add <4 x i64> [[TMP16]], [[WIDE_LOAD]] +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP18]], ptr [[TMP19]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_LD2:%.*]] = getelementptr i64, ptr [[P2]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: [[LD2:%.*]] = load i64, ptr [[GEP_LD2]], align 8 +; COMPARE-NO-MV-NEXT: [[VAL:%.*]] = add i64 [[LD]], [[LD2]] +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP32:![0-9]+]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @stride_btc_independent_memdep_triple_check( +; COMPARE-LAA-MV-SAME: ptr [[P:%.*]], ptr noalias [[P2:%.*]], i64 [[STRIDE:%.*]], i64 [[OUT_OFFSET:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: [[P_OUT:%.*]] = getelementptr i8, ptr [[P2]], i64 [[OUT_OFFSET]] +; COMPARE-LAA-MV-NEXT: [[N:%.*]] = add i64 [[STRIDE]], 3 +; COMPARE-LAA-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_MEMCHECK]]: +; COMPARE-LAA-MV-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[OUT_OFFSET]], 32 +; COMPARE-LAA-MV-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[P]], align 8 +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[P2]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP0]], ptr [[P_OUT]], align 8 +; COMPARE-LAA-MV-NEXT: br label %[[MIDDLE_BLOCK:.*]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_LD2:%.*]] = getelementptr i64, ptr [[P2]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: [[LD2:%.*]] = load i64, ptr [[GEP_LD2]], align 8 +; COMPARE-LAA-MV-NEXT: [[VAL:%.*]] = add i64 [[LD]], [[LD2]] +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP37:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + %p.out = getelementptr i8, ptr %p2, i64 %out.offset + %n = add i64 %stride, 3 + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.ld2 = getelementptr i64, ptr %p2, i64 %iv + %ld2 = load i64, ptr %gep.ld2, align 8 + + %val = add i64 %ld, %ld2 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, %n + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +define void @actual_stride_not_in_ir(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @actual_stride_not_in_ir( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[TMP5]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[TMP6]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[TMP7]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[TMP8]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP10]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP11]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = load i64, ptr [[TMP12]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> poison, i64 [[TMP13]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = insertelement <4 x i64> [[TMP17]], i64 [[TMP14]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> [[TMP18]], i64 [[TMP15]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = insertelement <4 x i64> [[TMP19]], i64 [[TMP16]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP20]], ptr [[TMP21]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @actual_stride_not_in_ir( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[TMP5]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[TMP6]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[TMP7]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[TMP8]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP10]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP11]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = load i64, ptr [[TMP12]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> poison, i64 [[TMP13]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = insertelement <4 x i64> [[TMP17]], i64 [[TMP14]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> [[TMP18]], i64 [[TMP15]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP20:%.*]] = insertelement <4 x i64> [[TMP19]], i64 [[TMP16]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP20]], ptr [[TMP21]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %base = getelementptr i64, ptr %p, i64 %idx + %gep.ld = getelementptr i64, ptr %base, i64 %idx + + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Gep into multi-dimensional array. Strided last index can be speculated to +; result in a unit-strided memory access. +define void @nd_array_last_idx(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @nd_array_last_idx( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 42, i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 42, i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 42, i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 42, i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @nd_array_last_idx( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 42, i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 42, i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP40:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr [256 x [256 x i64]], ptr %p, i64 1, i64 42, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + + +; Strided inner index will never result in unit-strided memory-access, even if +; its stride is one. +define void @nd_array_non_last_idx(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @nd_array_non_last_idx( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP1]], i64 42 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP2]], i64 42 +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP3]], i64 42 +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP4]], i64 42 +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @nd_array_non_last_idx( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[INDEX]], i64 42 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP1]], i64 42 +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP2]], i64 42 +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP3]], i64 42 +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = load i64, ptr [[TMP4]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = insertelement <4 x i64> poison, i64 [[TMP8]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> [[TMP12]], i64 [[TMP9]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP15]], ptr [[TMP16]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[IDX]], i64 42 +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP42:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr [256 x [256 x i64]], ptr %p, i64 1, i64 %idx, i64 42 + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Isn't unit-strided either. +define void @nd_array_multiple_idxs(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @nd_array_multiple_idxs( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP1]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP2]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP3]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP4]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @nd_array_multiple_idxs( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP1]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP2]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP3]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP4]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr [256 x [256 x i64]], ptr %p, i64 1, i64 %idx, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Stride is used through `sext` in the loop. +define void @sext_stride(ptr noalias %p.out, ptr %p, i32 %stride.i32) { +; COMPARE-NO-MV-LABEL: define void @sext_stride( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i32 [[STRIDE_I32:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[STRIDE_I32]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = sext <4 x i32> [[BROADCAST_SPLAT]] to <4 x i64> +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[TMP0]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @sext_stride( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i32 [[STRIDE_I32:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[STRIDE_I32]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[STRIDE:%.*]] = sext i32 [[STRIDE_I32]] to i64 +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP45:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %stride = sext i32 %stride.i32 to i64 + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Stride is used through `trunc` in the loop. +define void @trunc_stride(ptr noalias %p.out, ptr %p, i64 %stride.i64) { +; COMPARE-NO-MV-LABEL: define void @trunc_stride( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE_I64:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-NO-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = trunc i64 [[STRIDE_I64]] to i32 +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = sub i32 0, [[TMP0]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP0]], 0 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 [[TMP1]], i32 [[TMP0]] +; COMPARE-NO-MV-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[TMP3]], i32 127) +; COMPARE-NO-MV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 +; COMPARE-NO-MV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = sub i32 0, [[MUL_RESULT]] +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = icmp slt i32 [[MUL_RESULT]], 0 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = icmp sgt i32 [[TMP4]], 0 +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = select i1 [[TMP2]], i1 [[TMP6]], i1 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP8]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE_I64]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = trunc <4 x i64> [[BROADCAST_SPLAT]] to <4 x i32> +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = mul <4 x i32> [[VEC_IND]], [[TMP9]] +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = extractelement <4 x i32> [[TMP10]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = extractelement <4 x i32> [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = extractelement <4 x i32> [[TMP10]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = extractelement <4 x i32> [[TMP10]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P]], i32 [[TMP11]] +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[P]], i32 [[TMP12]] +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[P]], i32 [[TMP13]] +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[P]], i32 [[TMP14]] +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP15]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP16]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = insertelement <4 x i32> poison, i32 [[TMP19]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP23]], i32 [[TMP20]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP24]], i32 [[TMP21]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP22]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i32> [[VEC_IND]], splat (i32 4) +; COMPARE-NO-MV-NEXT: [[TMP28:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[STRIDE:%.*]] = trunc i64 [[STRIDE_I64]] to i32 +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i32 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i32, ptr [[P]], i32 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i32, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[IV]] +; COMPARE-NO-MV-NEXT: store i32 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i32 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP39:![0-9]+]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @trunc_stride( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE_I64:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE_I64]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[P]], i32 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i32> [[WIDE_LOAD]], ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP46:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[STRIDE:%.*]] = trunc i64 [[STRIDE_I64]] to i32 +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i32 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i32, ptr [[P]], i32 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i32, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[IV]] +; COMPARE-LAA-MV-NEXT: store i32 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i32 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP47:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %header ] + %stride = trunc i64 %stride.i64 to i32 + %iv.next = add nsw i32 %iv, 1 + + %idx = mul i32 %iv, %stride + + %gep.ld = getelementptr i32, ptr %p, i32 %idx + %ld = load i32, ptr %gep.ld, align 8 + + %gep.st = getelementptr i32, ptr %p.out, i32 %iv + store i32 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i32 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; %stride is usedc through both `trunc`/`sext` for different accesses. +define void @trunc_ext_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i32 %stride) { +; COMPARE-NO-MV-LABEL: define void @trunc_ext_stride( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i32 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: [[STRIDE_TRUNC:%.*]] = trunc i32 [[STRIDE]] to i16 +; COMPARE-NO-MV-NEXT: [[STRIDE_EXT:%.*]] = sext i32 [[STRIDE]] to i64 +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-NO-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = sub i16 0, [[STRIDE_TRUNC]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = icmp slt i16 [[STRIDE_TRUNC]], 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i16 [[TMP0]], i16 [[STRIDE_TRUNC]] +; COMPARE-NO-MV-NEXT: [[MUL:%.*]] = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 [[TMP2]], i16 127) +; COMPARE-NO-MV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i16, i1 } [[MUL]], 0 +; COMPARE-NO-MV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i16, i1 } [[MUL]], 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = sub i16 0, [[MUL_RESULT]] +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = icmp slt i16 [[MUL_RESULT]], 0 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = icmp sgt i16 [[TMP3]], 0 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = select i1 [[TMP1]], i1 [[TMP5]], i1 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = or i1 [[TMP6]], [[MUL_OVERFLOW]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP7]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[STRIDE_TRUNC]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i16> [[BROADCAST_SPLATINSERT]], <4 x i16> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE_EXT]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT1]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND3:%.*]] = phi <4 x i16> [ <i16 0, i16 1, i16 2, i16 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT4:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = sext <4 x i32> [[VEC_IND]] to <4 x i64> +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = mul <4 x i16> [[VEC_IND3]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = extractelement <4 x i16> [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = extractelement <4 x i16> [[TMP9]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = extractelement <4 x i16> [[TMP9]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = extractelement <4 x i16> [[TMP9]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = mul <4 x i64> [[TMP8]], [[BROADCAST_SPLAT2]] +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = extractelement <4 x i64> [[TMP14]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = extractelement <4 x i64> [[TMP14]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = extractelement <4 x i64> [[TMP14]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = extractelement <4 x i64> [[TMP14]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[P0]], i16 [[TMP10]] +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[P0]], i16 [[TMP11]] +; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[P0]], i16 [[TMP12]] +; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[P0]], i16 [[TMP13]] +; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[P0]], i64 [[TMP15]] +; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[P0]], i64 [[TMP16]] +; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[P0]], i64 [[TMP17]] +; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = getelementptr i32, ptr [[P0]], i64 [[TMP18]] +; COMPARE-NO-MV-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP19]], align 4 +; COMPARE-NO-MV-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP20]], align 4 +; COMPARE-NO-MV-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP21]], align 4 +; COMPARE-NO-MV-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP22]], align 4 +; COMPARE-NO-MV-NEXT: [[TMP31:%.*]] = insertelement <4 x i32> poison, i32 [[TMP27]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP32:%.*]] = insertelement <4 x i32> [[TMP31]], i32 [[TMP28]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP33:%.*]] = insertelement <4 x i32> [[TMP32]], i32 [[TMP29]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP33]], i32 [[TMP30]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP23]], align 4 +; COMPARE-NO-MV-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP24]], align 4 +; COMPARE-NO-MV-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP25]], align 4 +; COMPARE-NO-MV-NEXT: [[TMP38:%.*]] = load i32, ptr [[TMP26]], align 4 +; COMPARE-NO-MV-NEXT: [[TMP39:%.*]] = insertelement <4 x i32> poison, i32 [[TMP35]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP40:%.*]] = insertelement <4 x i32> [[TMP39]], i32 [[TMP36]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP41:%.*]] = insertelement <4 x i32> [[TMP40]], i32 [[TMP37]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP42:%.*]] = insertelement <4 x i32> [[TMP41]], i32 [[TMP38]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP43:%.*]] = add <4 x i32> [[TMP34]], [[TMP42]] +; COMPARE-NO-MV-NEXT: [[TMP44:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP44]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i32> [[VEC_IND]], splat (i32 4) +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT4]] = add <4 x i16> [[VEC_IND3]], splat (i16 4) +; COMPARE-NO-MV-NEXT: [[TMP45:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP45]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IV_TRUNC:%.*]] = trunc i32 [[IV]] to i16 +; COMPARE-NO-MV-NEXT: [[IV_EXT:%.*]] = sext i32 [[IV]] to i64 +; COMPARE-NO-MV-NEXT: [[IDX_TRUNC:%.*]] = mul i16 [[IV_TRUNC]], [[STRIDE_TRUNC]] +; COMPARE-NO-MV-NEXT: [[IDX_EXT:%.*]] = mul i64 [[IV_EXT]], [[STRIDE_EXT]] +; COMPARE-NO-MV-NEXT: [[GEP_TRUNC:%.*]] = getelementptr i32, ptr [[P0]], i16 [[IDX_TRUNC]] +; COMPARE-NO-MV-NEXT: [[GEP_EXT:%.*]] = getelementptr i32, ptr [[P0]], i64 [[IDX_EXT]] +; COMPARE-NO-MV-NEXT: [[LD_TRUNC:%.*]] = load i32, ptr [[GEP_TRUNC]], align 4 +; COMPARE-NO-MV-NEXT: [[LD_EXT:%.*]] = load i32, ptr [[GEP_EXT]], align 4 +; COMPARE-NO-MV-NEXT: [[VAL:%.*]] = add i32 [[LD_TRUNC]], [[LD_EXT]] +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[IV]] +; COMPARE-NO-MV-NEXT: store i32 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i32 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP41:![0-9]+]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @trunc_ext_stride( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i32 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: [[STRIDE_TRUNC:%.*]] = trunc i32 [[STRIDE]] to i16 +; COMPARE-LAA-MV-NEXT: [[STRIDE_EXT:%.*]] = sext i32 [[STRIDE]] to i64 +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = trunc i32 [[INDEX]] to i16 +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = sext i32 [[INDEX]] to i64 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = mul i16 [[TMP0]], [[STRIDE_TRUNC]] +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[P0]], i16 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[P0]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = add <4 x i32> [[WIDE_LOAD]], [[WIDE_LOAD1]] +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP48:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IV_TRUNC:%.*]] = trunc i32 [[IV]] to i16 +; COMPARE-LAA-MV-NEXT: [[IV_EXT:%.*]] = sext i32 [[IV]] to i64 +; COMPARE-LAA-MV-NEXT: [[IDX_TRUNC:%.*]] = mul i16 [[IV_TRUNC]], [[STRIDE_TRUNC]] +; COMPARE-LAA-MV-NEXT: [[IDX_EXT:%.*]] = mul i64 [[IV_EXT]], [[STRIDE_EXT]] +; COMPARE-LAA-MV-NEXT: [[GEP_TRUNC:%.*]] = getelementptr i32, ptr [[P0]], i16 [[IDX_TRUNC]] +; COMPARE-LAA-MV-NEXT: [[GEP_EXT:%.*]] = getelementptr i32, ptr [[P0]], i64 [[IDX_EXT]] +; COMPARE-LAA-MV-NEXT: [[LD_TRUNC:%.*]] = load i32, ptr [[GEP_TRUNC]], align 4 +; COMPARE-LAA-MV-NEXT: [[LD_EXT:%.*]] = load i32, ptr [[GEP_EXT]], align 4 +; COMPARE-LAA-MV-NEXT: [[VAL:%.*]] = add i32 [[LD_TRUNC]], [[LD_EXT]] +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[IV]] +; COMPARE-LAA-MV-NEXT: store i32 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i32 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP49:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + %stride.trunc = trunc i32 %stride to i16 + %stride.ext = sext i32 %stride to i64 + br label %header + +header: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i32 %iv, 1 + + %iv.trunc = trunc i32 %iv to i16 + %iv.ext = sext i32 %iv to i64 + + %idx.trunc = mul i16 %iv.trunc, %stride.trunc + %idx.ext = mul i64 %iv.ext, %stride.ext + + %gep.trunc = getelementptr i32, ptr %p0, i16 %idx.trunc + %gep.ext = getelementptr i32, ptr %p0, i64 %idx.ext + + %ld.trunc = load i32, ptr %gep.trunc, align 4 + %ld.ext = load i32, ptr %gep.ext, align 4 + + %val = add i32 %ld.trunc, %ld.ext + + %gep.st = getelementptr i32, ptr %p.out, i32 %iv + store i32 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i32 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Check that we don't speculate unit-strided masked memory access if masked wide +; memory operation isn't legal (or that we properly pass the mask if it is). +define void @basic_masked(ptr noalias %p.out, ptr %p, i64 %stride, i64 %x) { +; COMPARE-NO-MV-LABEL: define void @basic_masked( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[X:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[X]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT1]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE8:.*]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE8]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = icmp sge <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: br i1 [[TMP2]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; COMPARE-NO-MV: [[PRED_STORE_IF]]: +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0 +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP6]] +; COMPARE-NO-MV-NEXT: store i64 [[TMP5]], ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: br label %[[PRED_STORE_CONTINUE]] +; COMPARE-NO-MV: [[PRED_STORE_CONTINUE]]: +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: br i1 [[TMP8]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4:.*]] +; COMPARE-NO-MV: [[PRED_STORE_IF3]]: +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP9]] +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 1 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP12]] +; COMPARE-NO-MV-NEXT: store i64 [[TMP11]], ptr [[TMP13]], align 8 +; COMPARE-NO-MV-NEXT: br label %[[PRED_STORE_CONTINUE4]] +; COMPARE-NO-MV: [[PRED_STORE_CONTINUE4]]: +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]] +; COMPARE-NO-MV: [[PRED_STORE_IF5]]: +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP15]] +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = load i64, ptr [[TMP16]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 2 +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP18]] +; COMPARE-NO-MV-NEXT: store i64 [[TMP17]], ptr [[TMP19]], align 8 +; COMPARE-NO-MV-NEXT: br label %[[PRED_STORE_CONTINUE6]] +; COMPARE-NO-MV: [[PRED_STORE_CONTINUE6]]: +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: br i1 [[TMP20]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8]] +; COMPARE-NO-MV: [[PRED_STORE_IF7]]: +; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP21]] +; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = load i64, ptr [[TMP22]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = add i64 [[INDEX]], 3 +; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP24]] +; COMPARE-NO-MV-NEXT: store i64 [[TMP23]], ptr [[TMP25]], align 8 +; COMPARE-NO-MV-NEXT: br label %[[PRED_STORE_CONTINUE8]] +; COMPARE-NO-MV: [[PRED_STORE_CONTINUE8]]: +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @basic_masked( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[X:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[X]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE6:.*]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE6]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = icmp sge <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP1]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; COMPARE-LAA-MV: [[PRED_STORE_IF]]: +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: store i64 [[TMP4]], ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: br label %[[PRED_STORE_CONTINUE]] +; COMPARE-LAA-MV: [[PRED_STORE_CONTINUE]]: +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP6]], label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2:.*]] +; COMPARE-LAA-MV: [[PRED_STORE_IF1]]: +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 1 +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP7]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP7]] +; COMPARE-LAA-MV-NEXT: store i64 [[TMP9]], ptr [[TMP10]], align 8 +; COMPARE-LAA-MV-NEXT: br label %[[PRED_STORE_CONTINUE2]] +; COMPARE-LAA-MV: [[PRED_STORE_CONTINUE2]]: +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP11]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4:.*]] +; COMPARE-LAA-MV: [[PRED_STORE_IF3]]: +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 2 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP12]] +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP12]] +; COMPARE-LAA-MV-NEXT: store i64 [[TMP14]], ptr [[TMP15]], align 8 +; COMPARE-LAA-MV-NEXT: br label %[[PRED_STORE_CONTINUE4]] +; COMPARE-LAA-MV: [[PRED_STORE_CONTINUE4]]: +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP16]], label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6]] +; COMPARE-LAA-MV: [[PRED_STORE_IF5]]: +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 3 +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP17]] +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = load i64, ptr [[TMP18]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP20:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP17]] +; COMPARE-LAA-MV-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 +; COMPARE-LAA-MV-NEXT: br label %[[PRED_STORE_CONTINUE6]] +; COMPARE-LAA-MV: [[PRED_STORE_CONTINUE6]]: +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP50:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[C:%.*]] = icmp sge i64 [[IV]], [[X]] +; COMPARE-LAA-MV-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[LATCH]] +; COMPARE-LAA-MV: [[IF]]: +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: br label %[[LATCH]] +; COMPARE-LAA-MV: [[LATCH]]: +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP51:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ] + %iv.next = add nsw i64 %iv, 1 + %c = icmp sge i64 %iv, %x + br i1 %c, label %if, label %latch + +if: + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + br label %latch + +latch: + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; See https://github.com/llvm/llvm-project/issues/162922. +define void @stride_poison(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @stride_poison( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], poison +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @stride_poison( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], poison +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP52:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, poison + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Tests above all used loads, make sure that store is supported too. +define void @basic_strided_store(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @basic_strided_store( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = extractelement <4 x i64> [[WIDE_LOAD]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[WIDE_LOAD]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = extractelement <4 x i64> [[WIDE_LOAD]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = extractelement <4 x i64> [[WIDE_LOAD]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: store i64 [[TMP6]], ptr [[TMP10]], align 8 +; COMPARE-NO-MV-NEXT: store i64 [[TMP7]], ptr [[TMP11]], align 8 +; COMPARE-NO-MV-NEXT: store i64 [[TMP8]], ptr [[TMP12]], align 8 +; COMPARE-NO-MV-NEXT: store i64 [[TMP9]], ptr [[TMP13]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @basic_strided_store( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP53:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP54:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %iv + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %idx + store i64 %ld, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; This test shows how/if we scalarize address computation def-chain if that +; pointer has other non-scalar uses. +define void @ptr_vec_use(ptr noalias %p.out, ptr noalias %p.ptr.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @ptr_vec_use( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr noalias [[P_PTR_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P]], <4 x i64> [[TMP0]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x ptr> [[TMP1]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x ptr> [[TMP1]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x ptr> [[TMP1]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x ptr> [[TMP1]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP2]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP3]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = load i64, ptr [[TMP4]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = insertelement <4 x i64> poison, i64 [[TMP6]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = insertelement <4 x i64> [[TMP10]], i64 [[TMP7]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = insertelement <4 x i64> [[TMP11]], i64 [[TMP8]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> [[TMP12]], i64 [[TMP9]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP13]], ptr [[TMP14]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = getelementptr ptr, ptr [[P_PTR_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x ptr> [[TMP1]], ptr [[TMP15]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @ptr_vec_use( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr noalias [[P_PTR_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], <4 x i64> [[VEC_IND]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x ptr> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP2]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = getelementptr ptr, ptr [[P_PTR_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x ptr> [[TMP0]], ptr [[TMP3]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP55:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_PTR_ST:%.*]] = getelementptr ptr, ptr [[P_PTR_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store ptr [[GEP_LD]], ptr [[GEP_PTR_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP56:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %gep.ptr.st = getelementptr ptr, ptr %p.ptr.out, i64 %iv + store ptr %gep.ld, ptr %gep.ptr.st + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Similar to above, but it's not the resulting pointer itself that has +; non-scalar use but something in the middle of its def-chain. +define void @stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @stride_idx_vec_use( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = mul <4 x i64> [[TMP16]], [[TMP0]] +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP46:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @stride_idx_vec_use( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[WIDE_LOAD]], [[VEC_IND]] +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP1]], ptr [[TMP2]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP57:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[VAL:%.*]] = mul i64 [[LD]], [[IDX]] +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP58:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %idx = mul i64 %iv, %stride + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %val = mul i64 %ld, %idx + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; Another variation of the above, even longer def-chain. +define void @offset_stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @offset_stride_idx_vec_use( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = add <4 x i64> [[TMP0]], splat (i64 42) +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = mul <4 x i64> [[TMP17]], [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP18]], ptr [[TMP19]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @offset_stride_idx_vec_use( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = add <4 x i64> [[VEC_IND]], splat (i64 42) +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = mul <4 x i64> [[WIDE_LOAD]], [[TMP0]] +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP3]], ptr [[TMP4]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP59:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[IV_TIMES_STRIDE:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = add i64 [[IV_TIMES_STRIDE]], 42 +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[VAL:%.*]] = mul i64 [[LD]], [[IDX]] +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP60:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %iv.next = add nsw i64 %iv, 1 + + %iv.times.stride = mul i64 %iv, %stride + %idx = add i64 %iv.times.stride, 42 + + %gep.ld = getelementptr i64, ptr %p, i64 %idx + %ld = load i64, ptr %gep.ld, align 8 + + %val = mul i64 %ld, %idx + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %val, ptr %gep.st, align 8 + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +; No VPlan dump because `%gep.ld` phi doesn't pass legality currently. +define void @base_ptr_induction_vec_use(ptr noalias %p.out, ptr noalias %p.ptr.out, ptr %p, i64 %stride) { +; COMPARE-NO-MV-LABEL: define void @base_ptr_induction_vec_use( +; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr noalias [[P_PTR_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = phi ptr [ [[P]], %[[ENTRY]] ], [ [[GEP_LD_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[GEP_LD_NEXT]] = getelementptr inbounds i64, ptr [[GEP_LD]], i64 [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_PTR_ST:%.*]] = getelementptr ptr, ptr [[P_PTR_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store ptr [[GEP_LD]], ptr [[GEP_PTR_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT:.*]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @base_ptr_induction_vec_use( +; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr noalias [[P_PTR_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = phi ptr [ [[P]], %[[ENTRY]] ], [ [[GEP_LD_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[GEP_LD_NEXT]] = getelementptr inbounds i64, ptr [[GEP_LD]], i64 [[STRIDE]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_PTR_ST:%.*]] = getelementptr ptr, ptr [[P_PTR_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store ptr [[GEP_LD]], ptr [[GEP_PTR_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + br label %header + +header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %header ] + %gep.ld = phi ptr [ %p, %entry ], [ %gep.ld.next, %header ] + %iv.next = add nsw i64 %iv, 1 + %gep.ld.next = getelementptr inbounds i64, ptr %gep.ld, i64 %stride + + %ld = load i64, ptr %gep.ld, align 8 + + %gep.st = getelementptr i64, ptr %p.out, i64 %iv + store i64 %ld, ptr %gep.st, align 8 + + %gep.ptr.st = getelementptr ptr, ptr %p.ptr.out, i64 %iv + store ptr %gep.ld, ptr %gep.ptr.st + + %exitcond = icmp slt i64 %iv.next, 128 + br i1 %exitcond, label %header, label %exit + +exit: + ret void +} + +define void @test_rewrite_iv_scevs(i32 %start, ptr %dst) { +; COMPARE-NO-MV-LABEL: define void @test_rewrite_iv_scevs( +; COMPARE-NO-MV-SAME: i32 [[START:%.*]], ptr [[DST:%.*]]) { +; COMPARE-NO-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-NO-MV-NEXT: [[START_EXT:%.*]] = zext i32 [[START]] to i64 +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = sub i64 100, [[START_EXT]] +; COMPARE-NO-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 4 +; COMPARE-NO-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV: [[VECTOR_PH]]: +; COMPARE-NO-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 4 +; COMPARE-NO-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = add i64 [[START_EXT]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = mul i64 [[N_VEC]], [[START_EXT]] +; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-NO-MV: [[VECTOR_BODY]]: +; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], [[START_EXT]] +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = mul i64 1, [[START_EXT]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = mul i64 2, [[START_EXT]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], [[TMP7]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = mul i64 3, [[START_EXT]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], [[TMP9]] +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr float, ptr [[DST]], i64 [[OFFSET_IDX]] +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = getelementptr float, ptr [[DST]], i64 [[TMP6]] +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[DST]], i64 [[TMP8]] +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[DST]], i64 [[TMP10]] +; COMPARE-NO-MV-NEXT: store float 0.000000e+00, ptr [[TMP11]], align 4 +; COMPARE-NO-MV-NEXT: store float 0.000000e+00, ptr [[TMP12]], align 4 +; COMPARE-NO-MV-NEXT: store float 0.000000e+00, ptr [[TMP13]], align 4 +; COMPARE-NO-MV-NEXT: store float 0.000000e+00, ptr [[TMP14]], align 4 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP48:![0-9]+]] +; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: +; COMPARE-NO-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP1]], %[[MIDDLE_BLOCK]] ], [ [[START_EXT]], %[[ENTRY]] ] +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; COMPARE-NO-MV-NEXT: br label %[[LOOP:.*]] +; COMPARE-NO-MV: [[LOOP]]: +; COMPARE-NO-MV-NEXT: [[IV_0:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_0_NEXT:%.*]], %[[LOOP]] ] +; COMPARE-NO-MV-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ] +; COMPARE-NO-MV-NEXT: [[GEP_DST:%.*]] = getelementptr float, ptr [[DST]], i64 [[IV_1]] +; COMPARE-NO-MV-NEXT: store float 0.000000e+00, ptr [[GEP_DST]], align 4 +; COMPARE-NO-MV-NEXT: [[IV_1_NEXT]] = add i64 [[IV_1]], [[START_EXT]] +; COMPARE-NO-MV-NEXT: [[IV_0_NEXT]] = add i64 [[IV_0]], 1 +; COMPARE-NO-MV-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_0_NEXT]], 100 +; COMPARE-NO-MV-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP49:![0-9]+]] +; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: ret void +; +; COMPARE-LAA-MV-LABEL: define void @test_rewrite_iv_scevs( +; COMPARE-LAA-MV-SAME: i32 [[START:%.*]], ptr [[DST:%.*]]) { +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: [[START_EXT:%.*]] = zext i32 [[START]] to i64 +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = sub i64 100, [[START_EXT]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[START]], 1 +; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr float, ptr [[DST]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x float> zeroinitializer, ptr [[TMP1]], align 4 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP61:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: br label %[[SCALAR_PH]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 97, %[[MIDDLE_BLOCK]] ], [ [[START_EXT]], %[[VECTOR_SCEVCHECK]] ] +; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 96, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ] +; COMPARE-LAA-MV-NEXT: br label %[[LOOP:.*]] +; COMPARE-LAA-MV: [[LOOP]]: +; COMPARE-LAA-MV-NEXT: [[IV_0:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_0_NEXT:%.*]], %[[LOOP]] ] +; COMPARE-LAA-MV-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ] +; COMPARE-LAA-MV-NEXT: [[GEP_DST:%.*]] = getelementptr float, ptr [[DST]], i64 [[IV_1]] +; COMPARE-LAA-MV-NEXT: store float 0.000000e+00, ptr [[GEP_DST]], align 4 +; COMPARE-LAA-MV-NEXT: [[IV_1_NEXT]] = add i64 [[IV_1]], [[START_EXT]] +; COMPARE-LAA-MV-NEXT: [[IV_0_NEXT]] = add i64 [[IV_0]], 1 +; COMPARE-LAA-MV-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_0_NEXT]], 100 +; COMPARE-LAA-MV-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP62:![0-9]+]] +; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: ret void +; +entry: + %start.ext = zext i32 %start to i64 + br label %loop + +loop: + %iv.0 = phi i64 [ %start.ext, %entry ], [ %iv.0.next, %loop ] + %iv.1 = phi i64 [ 0, %entry ], [ %iv.1.next, %loop ] + %gep.dst = getelementptr float, ptr %dst, i64 %iv.1 + store float 0.0, ptr %gep.dst, align 4 + %iv.1.next = add i64 %iv.1, %start.ext + %iv.0.next = add i64 %iv.0, 1 + %ec = icmp eq i64 %iv.0.next, 100 + br i1 %ec, label %exit, label %loop + +exit: + ret void +} + +; Keep this in sync with the same under VPlan/ |
