diff options
Diffstat (limited to 'llvm/test/Transforms')
11 files changed, 797 insertions, 328 deletions
diff --git a/llvm/test/Transforms/AggressiveInstCombine/memchr.ll b/llvm/test/Transforms/AggressiveInstCombine/memchr.ll index b26320b..6fbe960 100644 --- a/llvm/test/Transforms/AggressiveInstCombine/memchr.ll +++ b/llvm/test/Transforms/AggressiveInstCombine/memchr.ll @@ -6,9 +6,10 @@ declare ptr @memchr(ptr, i32, i64) -define i1 @test_memchr_null(i32 %x) { +define i1 @test_memchr_null(i32 %x) !prof !0 { ; CHECK-LABEL: define i1 @test_memchr_null( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0:![0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8 ; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [ @@ -40,9 +41,10 @@ entry: ret i1 %isnull } -define ptr @test_memchr(i32 %x) { +define ptr @test_memchr(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8 ; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [ @@ -72,16 +74,17 @@ entry: ret ptr %memchr } -define ptr @test_memchr_smaller_n(i32 %x) { +define ptr @test_memchr_smaller_n(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_smaller_n( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8 ; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [ ; CHECK-NEXT: i8 48, label %[[MEMCHR_CASE:.*]] ; CHECK-NEXT: i8 49, label %[[MEMCHR_CASE1:.*]] ; CHECK-NEXT: i8 0, label %[[MEMCHR_CASE2:.*]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF_1:![0-9]+]] ; CHECK: [[MEMCHR_CASE]]: ; CHECK-NEXT: br label %[[MEMCHR_SUCCESS:.*]] ; CHECK: [[MEMCHR_CASE1]]: @@ -103,9 +106,10 @@ entry: ; negative tests -define ptr @test_memchr_larger_n(i32 %x) { +define ptr @test_memchr_larger_n(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_larger_n( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i64 6) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -115,9 +119,10 @@ entry: ret ptr %memchr } -define ptr @test_memchr_non_constant(i32 %x, ptr %str) { +define ptr @test_memchr_non_constant(i32 %x, ptr %str) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_non_constant( -; CHECK-SAME: i32 [[X:%.*]], ptr [[STR:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]], ptr [[STR:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr [[STR]], i32 [[X]], i64 5) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -127,8 +132,9 @@ entry: ret ptr %memchr } -define ptr @test_memchr_constant_ch() { -; CHECK-LABEL: define ptr @test_memchr_constant_ch() { +define ptr @test_memchr_constant_ch() !prof !0 { +; CHECK-LABEL: define ptr @test_memchr_constant_ch() +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 49, i64 5) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -138,9 +144,10 @@ entry: ret ptr %memchr } -define ptr @test_memchr_dynamic_n(i32 %x, i32 %y) { +define ptr @test_memchr_dynamic_n(i32 %x, i32 %y) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_dynamic_n( -; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i32 [[Y]]) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -150,9 +157,10 @@ entry: ret ptr %memchr } -define ptr @test_memchr_long(i32 %x) { +define ptr @test_memchr_long(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_long( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str_long, i32 [[X]], i64 8) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -163,9 +171,10 @@ entry: } ; We want to check that the compiler still calls memchr if the length is non-constant: -define ptr @test_memchr_non_constant_length2(i32 %x, i64 %len) { +define ptr @test_memchr_non_constant_length2(i32 %x, i64 %len) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_non_constant_length2( -; CHECK-SAME: i32 [[X:%.*]], i64 [[LEN:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]], i64 [[LEN:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i64 [[LEN]]) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -174,3 +183,7 @@ entry: %memchr = call ptr @memchr(ptr @str, i32 %x, i64 %len) ret ptr %memchr } + +!0 = !{!"function_entry_count", i64 1000} +; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000} +; CHECK: [[PROF_1]] = !{!"unknown", !"aggressive-instcombine"}
\ No newline at end of file diff --git a/llvm/test/Transforms/LoopVectorize/X86/cleanup-runtime-checks.ll b/llvm/test/Transforms/LoopVectorize/X86/cleanup-runtime-checks.ll new file mode 100644 index 0000000..41753f7 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/X86/cleanup-runtime-checks.ll @@ -0,0 +1,79 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -p loop-vectorize -S %s | FileCheck %s + +target triple = "x86_64-unknown-linux-gnu" + +declare ptr @get() +declare i1 @cond() + +; Make sure we can clean up the created runtime checks, if vectorization isn't +; profitable. +define void @widget(i32 %arg, i64 %arg1, ptr %src) #0 { +; CHECK-LABEL: define void @widget( +; CHECK-SAME: i32 [[ARG:%.*]], i64 [[ARG1:%.*]], ptr [[SRC:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[LOOP_1_HEADER:.*]] +; CHECK: [[LOOP_1_HEADER]]: +; CHECK-NEXT: br label %[[INNER_1:.*]] +; CHECK: [[INNER_1]]: +; CHECK-NEXT: [[C_1:%.*]] = call i1 @cond() +; CHECK-NEXT: br i1 [[C_1]], label %[[INNER_2:.*]], label %[[INNER_1]] +; CHECK: [[INNER_2]]: +; CHECK-NEXT: [[LOAD:%.*]] = call ptr @get() +; CHECK-NEXT: [[C_2:%.*]] = call i1 @cond() +; CHECK-NEXT: br i1 [[C_2]], label %[[LOOP_2_PREHEADER:.*]], label %[[LOOP_1_LATCH:.*]] +; CHECK: [[LOOP_2_PREHEADER]]: +; CHECK-NEXT: br label %[[LOOP_2:.*]] +; CHECK: [[LOOP_1_LATCH]]: +; CHECK-NEXT: br label %[[LOOP_1_HEADER]] +; CHECK: [[LOOP_2]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP_2]] ], [ [[ARG]], %[[LOOP_2_PREHEADER]] ] +; CHECK-NEXT: [[PHI8:%.*]] = phi i32 [ [[OR:%.*]], %[[LOOP_2]] ], [ 99, %[[LOOP_2_PREHEADER]] ] +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i32, ptr [[SRC]], i32 [[IV]] +; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4 +; CHECK-NEXT: [[OR]] = or i32 [[PHI8]], [[L]] +; CHECK-NEXT: store i32 [[OR]], ptr [[LOAD]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 100 +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_2]], !prof [[PROF0:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop.1.header + +loop.1.header: + br label %inner.1 + +inner.1: + %c.1 = call i1 @cond() + br i1 %c.1, label %inner.2, label %inner.1 + +inner.2: + %load = call ptr @get() + %c.2 = call i1 @cond() + br i1 %c.2, label %loop.2, label %loop.1.latch + +loop.1.latch: + br label %loop.1.header + +loop.2: + %iv = phi i32 [ %arg, %inner.2 ], [ %iv.next, %loop.2 ] + %phi8 = phi i32 [ 99, %inner.2 ], [ %or, %loop.2 ] + %gep.src = getelementptr i32, ptr %src, i32 %iv + %l = load i32, ptr %gep.src, align 4 + %or = or i32 %phi8, %l + store i32 %or, ptr %load, align 4 + %iv.next = add i32 %iv, 1 + %ec = icmp eq i32 %iv, 100 + br i1 %ec, label %exit, label %loop.2, !prof !0 + +exit: + ret void +} + +attributes #0 = { "target-features"="+avx2" } +!0 = !{!"branch_weights", i32 89478484, i32 1879048192} +;. +; CHECK: [[PROF0]] = !{!"branch_weights", i32 89478484, i32 1879048192} +;. diff --git a/llvm/test/Transforms/LoopVectorize/pr45259.ll b/llvm/test/Transforms/LoopVectorize/pr45259.ll index fade726..f33437f 100644 --- a/llvm/test/Transforms/LoopVectorize/pr45259.ll +++ b/llvm/test/Transforms/LoopVectorize/pr45259.ll @@ -10,16 +10,15 @@ define i8 @widget(ptr %arr, i8 %t9) { ; CHECK-NEXT: br label [[BB6:%.*]] ; CHECK: bb6: ; CHECK-NEXT: [[T1_0:%.*]] = phi ptr [ [[ARR]], [[BB:%.*]] ], [ null, [[BB6]] ] +; CHECK-NEXT: [[T1_0_LCSSA2:%.*]] = ptrtoint ptr [[T1_0]] to i64 ; CHECK-NEXT: [[C:%.*]] = call i1 @cond() ; CHECK-NEXT: br i1 [[C]], label [[FOR_PREHEADER:%.*]], label [[BB6]] ; CHECK: for.preheader: -; CHECK-NEXT: [[T1_0_LCSSA:%.*]] = phi ptr [ [[T1_0]], [[BB6]] ] ; CHECK-NEXT: [[T1_0_LCSSA4:%.*]] = phi ptr [ [[T1_0]], [[BB6]] ] ; CHECK-NEXT: [[T1_0_LCSSA1:%.*]] = phi ptr [ [[T1_0]], [[BB6]] ] -; CHECK-NEXT: [[T1_0_LCSSA3:%.*]] = ptrtoint ptr [[T1_0_LCSSA]] to i64 -; CHECK-NEXT: [[T1_0_LCSSA2:%.*]] = ptrtoint ptr [[T1_0_LCSSA4]] to i64 ; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[ARR1]] to i32 ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[TMP0]] +; CHECK-NEXT: [[T1_0_LCSSA3:%.*]] = ptrtoint ptr [[T1_0_LCSSA4]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[T1_0_LCSSA3]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP3]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll b/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll index 047d36b..b9cb1cb 100644 --- a/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll +++ b/llvm/test/Transforms/LoopVectorize/pr47343-expander-lcssa-after-cfg-update.ll @@ -28,18 +28,15 @@ define void @f() { ; CHECK: outer.latch: ; CHECK-NEXT: br label [[OUTER_HEADER]] ; CHECK: outer.exit.0: -; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi ptr [ [[TMP0]], [[OUTER_HEADER]] ] ; CHECK-NEXT: br label [[LOOP_PREHEADER:%.*]] ; CHECK: outer.exit.1: -; CHECK-NEXT: [[DOTLCSSA1:%.*]] = phi ptr [ [[TMP0]], [[INNER_1_LATCH]] ] ; CHECK-NEXT: br label [[LOOP_PREHEADER]] ; CHECK: loop.preheader: -; CHECK-NEXT: [[TMP1:%.*]] = phi ptr [ [[DOTLCSSA]], [[OUTER_EXIT_0]] ], [ [[DOTLCSSA1]], [[OUTER_EXIT_1]] ] ; CHECK-NEXT: br label [[VECTOR_MEMCHECK:%.*]] ; CHECK: vector.memcheck: -; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[TMP1]], i64 1 +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[TMP0]], i64 1 ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr @f.e, [[SCEVGEP]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[TMP1]], getelementptr inbounds nuw (i8, ptr @f.e, i64 4) +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[TMP0]], getelementptr inbounds nuw (i8, ptr @f.e, i64 4) ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: @@ -59,7 +56,7 @@ define void @f() { ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH]] ] ; CHECK-NEXT: [[CONV6_US_US_US:%.*]] = zext i1 false to i32 ; CHECK-NEXT: store i32 [[CONV6_US_US_US]], ptr @f.e, align 1 -; CHECK-NEXT: store i8 10, ptr [[TMP1]], align 1 +; CHECK-NEXT: store i8 10, ptr [[TMP0]], align 1 ; CHECK-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], 500 ; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll index 73d5e26..5894c3a 100644 --- a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll +++ b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll @@ -109,14 +109,13 @@ define void @runtime_checks_ptr_inductions(ptr %dst.1, ptr %dst.2, i1 %c) { ; CHECK-NEXT: [[PTR_IV_1:%.*]] = phi ptr [ [[DST_1]], %[[ENTRY]] ], [ [[PTR_IV_1_NEXT:%.*]], %[[LOOP_1]] ] ; CHECK-NEXT: [[CALL:%.*]] = call i32 @val() ; CHECK-NEXT: [[SEL_DST:%.*]] = select i1 [[C]], ptr [[DST_1]], ptr [[DST_2]] +; CHECK-NEXT: [[SEL_DST_LCSSA12:%.*]] = ptrtoint ptr [[SEL_DST]] to i64 ; CHECK-NEXT: [[PTR_IV_1_NEXT]] = getelementptr i8, ptr [[PTR_IV_1]], i64 1 ; CHECK-NEXT: [[EC_1:%.*]] = icmp eq i32 [[CALL]], 0 ; CHECK-NEXT: br i1 [[EC_1]], label %[[LOOP_2_HEADER_PREHEADER:.*]], label %[[LOOP_1]] ; CHECK: [[LOOP_2_HEADER_PREHEADER]]: -; CHECK-NEXT: [[SEL_DST_LCSSA1:%.*]] = phi ptr [ [[SEL_DST]], %[[LOOP_1]] ] ; CHECK-NEXT: [[PTR_IV_1_LCSSA:%.*]] = phi ptr [ [[PTR_IV_1]], %[[LOOP_1]] ] ; CHECK-NEXT: [[SEL_DST_LCSSA:%.*]] = phi ptr [ [[SEL_DST]], %[[LOOP_1]] ] -; CHECK-NEXT: [[SEL_DST_LCSSA12:%.*]] = ptrtoint ptr [[SEL_DST_LCSSA1]] to i64 ; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]] ; CHECK: [[VECTOR_MEMCHECK]]: ; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[PTR_IV_1_LCSSA]] to i64 @@ -140,13 +139,13 @@ define void @runtime_checks_ptr_inductions(ptr %dst.1, ptr %dst.2, i1 %c) { ; CHECK-NEXT: br label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 1023, %[[MIDDLE_BLOCK]] ], [ 1, %[[VECTOR_MEMCHECK]] ] -; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi ptr [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[PTR_IV_1_LCSSA]], %[[VECTOR_MEMCHECK]] ] -; CHECK-NEXT: [[BC_RESUME_VAL5:%.*]] = phi ptr [ [[TMP3]], %[[MIDDLE_BLOCK]] ], [ [[SEL_DST_LCSSA]], %[[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL3:%.*]] = phi ptr [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[PTR_IV_1_LCSSA]], %[[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi ptr [ [[TMP3]], %[[MIDDLE_BLOCK]] ], [ [[SEL_DST_LCSSA]], %[[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label %[[LOOP_2_HEADER:.*]] ; CHECK: [[LOOP_2_HEADER]]: ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[DEC7:%.*]], %[[LOOP_2_LATCH:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] -; CHECK-NEXT: [[PTR_IV_2:%.*]] = phi ptr [ [[PTR_IV_2_NEXT:%.*]], %[[LOOP_2_LATCH]] ], [ [[BC_RESUME_VAL4]], %[[SCALAR_PH]] ] -; CHECK-NEXT: [[PTR_IV_3:%.*]] = phi ptr [ [[PTR_IV_3_NEXT:%.*]], %[[LOOP_2_LATCH]] ], [ [[BC_RESUME_VAL5]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[PTR_IV_2:%.*]] = phi ptr [ [[PTR_IV_2_NEXT:%.*]], %[[LOOP_2_LATCH]] ], [ [[BC_RESUME_VAL3]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[PTR_IV_3:%.*]] = phi ptr [ [[PTR_IV_3_NEXT:%.*]], %[[LOOP_2_LATCH]] ], [ [[BC_RESUME_VAL4]], %[[SCALAR_PH]] ] ; CHECK-NEXT: [[EC_2:%.*]] = icmp eq i32 [[IV]], 1024 ; CHECK-NEXT: br i1 [[EC_2]], label %[[EXIT:.*]], label %[[LOOP_2_LATCH]] ; CHECK: [[LOOP_2_LATCH]]: diff --git a/llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll b/llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll new file mode 100644 index 0000000..826696f --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/reverse-induction-gep-nowrap-flags.ll @@ -0,0 +1,182 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 +; RUN: opt -passes=loop-vectorize -force-vector-width=4 -S %s | FileCheck %s + +define i32 @preserve_inbounds(i64 %start, ptr %ptr) { +; CHECK-LABEL: define i32 @preserve_inbounds( +; CHECK-SAME: i64 [[START:%.*]], ptr [[PTR:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[START]], [[INDEX]] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 -3 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 +; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0> +; CHECK-NEXT: [[TMP4]] = add <4 x i32> [[REVERSE]], [[VEC_PHI]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) +; CHECK-NEXT: br label %[[END:.*]] +; CHECK: [[SCALAR_PH:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[REV_IND:%.*]] = phi i64 [ [[START]], %[[SCALAR_PH]] ], [ [[REV_IND_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[REDUX:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[REDUX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[REV_IND_NEXT]] = add i64 [[REV_IND]], -1 +; CHECK-NEXT: [[GEP_PTR_IND:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[REV_IND_NEXT]] +; CHECK-NEXT: [[LD_PTR:%.*]] = load i32, ptr [[GEP_PTR_IND]], align 4 +; CHECK-NEXT: [[REDUX_NEXT]] = add i32 [[LD_PTR]], [[REDUX]] +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp ne i32 [[IV_NEXT]], 1024 +; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[LOOP]], label %[[END]] +; CHECK: [[END]]: +; CHECK-NEXT: [[REDUX_NEXT_LCSSA:%.*]] = phi i32 [ [[REDUX_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[REDUX_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %rev.ind = phi i64 [ %start, %entry ], [ %rev.ind.next, %loop ] + %redux = phi i32 [ 0, %entry ], [ %redux.next, %loop ] + %rev.ind.next = add i64 %rev.ind, -1 + %gep.ptr.ind = getelementptr inbounds i32, ptr %ptr, i64 %rev.ind.next + %ld.ptr = load i32, ptr %gep.ptr.ind, align 4 + %redux.next = add i32 %ld.ptr, %redux + %iv.next = add i32 %iv, 1 + %exit.cond = icmp ne i32 %iv.next, 1024 + br i1 %exit.cond, label %loop, label %end + +end: + ret i32 %redux.next +} + +define i32 @preserve_nusw(i64 %start, ptr %ptr) { +; CHECK-LABEL: define i32 @preserve_nusw( +; CHECK-SAME: i64 [[START:%.*]], ptr [[PTR:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[START]], [[INDEX]] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr nusw i32, ptr [[PTR]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr nusw i32, ptr [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr nusw i32, ptr [[TMP2]], i32 -3 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 +; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0> +; CHECK-NEXT: [[TMP4]] = add <4 x i32> [[REVERSE]], [[VEC_PHI]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) +; CHECK-NEXT: br label %[[END:.*]] +; CHECK: [[SCALAR_PH:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[REV_IND:%.*]] = phi i64 [ [[START]], %[[SCALAR_PH]] ], [ [[REV_IND_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[REDUX:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[REDUX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[REV_IND_NEXT]] = add i64 [[REV_IND]], -1 +; CHECK-NEXT: [[GEP_PTR_IND:%.*]] = getelementptr nusw i32, ptr [[PTR]], i64 [[REV_IND_NEXT]] +; CHECK-NEXT: [[LD_PTR:%.*]] = load i32, ptr [[GEP_PTR_IND]], align 4 +; CHECK-NEXT: [[REDUX_NEXT]] = add i32 [[LD_PTR]], [[REDUX]] +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp ne i32 [[IV_NEXT]], 1024 +; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[LOOP]], label %[[END]] +; CHECK: [[END]]: +; CHECK-NEXT: [[REDUX_NEXT_LCSSA:%.*]] = phi i32 [ [[REDUX_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[REDUX_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %rev.ind = phi i64 [ %start, %entry ], [ %rev.ind.next, %loop ] + %redux = phi i32 [ 0, %entry ], [ %redux.next, %loop ] + %rev.ind.next = add i64 %rev.ind, -1 + %gep.ptr.ind = getelementptr nusw i32, ptr %ptr, i64 %rev.ind.next + %ld.ptr = load i32, ptr %gep.ptr.ind, align 4 + %redux.next = add i32 %ld.ptr, %redux + %iv.next = add i32 %iv, 1 + %exit.cond = icmp ne i32 %iv.next, 1024 + br i1 %exit.cond, label %loop, label %end + +end: + ret i32 %redux.next +} + +define i32 @drop_nuw(i64 %start, ptr %ptr) { +; CHECK-LABEL: define i32 @drop_nuw( +; CHECK-SAME: i64 [[START:%.*]], ptr [[PTR:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[START]], [[INDEX]] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr nuw i32, ptr [[PTR]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[TMP2]], i32 -3 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 +; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0> +; CHECK-NEXT: [[TMP4]] = add <4 x i32> [[REVERSE]], [[VEC_PHI]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) +; CHECK-NEXT: br label %[[END:.*]] +; CHECK: [[SCALAR_PH:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[REV_IND:%.*]] = phi i64 [ [[START]], %[[SCALAR_PH]] ], [ [[REV_IND_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[REDUX:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[REDUX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[REV_IND_NEXT]] = add i64 [[REV_IND]], -1 +; CHECK-NEXT: [[GEP_PTR_IND:%.*]] = getelementptr nuw i32, ptr [[PTR]], i64 [[REV_IND_NEXT]] +; CHECK-NEXT: [[LD_PTR:%.*]] = load i32, ptr [[GEP_PTR_IND]], align 4 +; CHECK-NEXT: [[REDUX_NEXT]] = add i32 [[LD_PTR]], [[REDUX]] +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp ne i32 [[IV_NEXT]], 1024 +; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[LOOP]], label %[[END]] +; CHECK: [[END]]: +; CHECK-NEXT: [[REDUX_NEXT_LCSSA:%.*]] = phi i32 [ [[REDUX_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[REDUX_NEXT_LCSSA]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %rev.ind = phi i64 [ %start, %entry ], [ %rev.ind.next, %loop ] + %redux = phi i32 [ 0, %entry ], [ %redux.next, %loop ] + %rev.ind.next = add i64 %rev.ind, -1 + %gep.ptr.ind = getelementptr nuw i32, ptr %ptr, i64 %rev.ind.next + %ld.ptr = load i32, ptr %gep.ptr.ind, align 4 + %redux.next = add i32 %ld.ptr, %redux + %iv.next = add i32 %iv, 1 + %exit.cond = icmp ne i32 %iv.next, 1024 + br i1 %exit.cond, label %loop, label %end + +end: + ret i32 %redux.next +} diff --git a/llvm/test/Transforms/LoopVectorize/skeleton-lcssa-crash.ll b/llvm/test/Transforms/LoopVectorize/skeleton-lcssa-crash.ll index 9c14a8c..1e4598e 100644 --- a/llvm/test/Transforms/LoopVectorize/skeleton-lcssa-crash.ll +++ b/llvm/test/Transforms/LoopVectorize/skeleton-lcssa-crash.ll @@ -23,18 +23,16 @@ define i16 @test(ptr %arg, i64 %N) { ; CHECK-NEXT: [[C_3:%.*]] = call i1 @cond() ; CHECK-NEXT: br i1 [[C_3]], label [[LOOP_3_PREHEADER:%.*]], label [[INNER_LATCH:%.*]] ; CHECK: loop.3.preheader: -; CHECK-NEXT: [[L_1_LCSSA:%.*]] = phi ptr [ [[L_1]], [[INNER_BB]] ] -; CHECK-NEXT: [[L_2_LCSSA:%.*]] = phi ptr [ [[L_2]], [[INNER_BB]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], 1 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 2 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] ; CHECK: vector.memcheck: -; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[L_2_LCSSA]], i64 2 -; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[L_1_LCSSA]], i64 2 +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[L_2]], i64 2 +; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[L_1]], i64 2 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[N]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 4 -; CHECK-NEXT: [[SCEVGEP6:%.*]] = getelementptr i8, ptr [[L_1_LCSSA]], i64 [[TMP2]] -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[L_2_LCSSA]], [[SCEVGEP6]] +; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[L_1]], i64 [[TMP2]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[L_2]], [[SCEVGEP3]] ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP5]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] @@ -67,19 +65,17 @@ define i16 @test(ptr %arg, i64 %N) { ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_3]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[C_5:%.*]] = icmp ult i64 [[IV]], [[N]] -; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i16, ptr [[L_1_LCSSA]], i64 [[IV_NEXT]] +; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i16, ptr [[L_1]], i64 [[IV_NEXT]] ; CHECK-NEXT: [[LOOP_L_1:%.*]] = load i16, ptr [[GEP_1]], align 2 -; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i16, ptr [[L_2_LCSSA]], i64 0 +; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i16, ptr [[L_2]], i64 0 ; CHECK-NEXT: store i16 [[LOOP_L_1]], ptr [[GEP_2]], align 2 ; CHECK-NEXT: br i1 [[C_5]], label [[LOOP_3]], label [[EXIT_LOOPEXIT]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: exit.loopexit: ; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: exit.loopexit1: -; CHECK-NEXT: [[L_1_LCSSA3:%.*]] = phi ptr [ [[L_1]], [[INNER_LATCH]] ] ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: -; CHECK-NEXT: [[L_14:%.*]] = phi ptr [ [[L_1_LCSSA3]], [[EXIT_LOOPEXIT1]] ], [ [[L_1_LCSSA]], [[EXIT_LOOPEXIT]] ] -; CHECK-NEXT: [[L_3:%.*]] = load i16, ptr [[L_14]], align 2 +; CHECK-NEXT: [[L_3:%.*]] = load i16, ptr [[L_1]], align 2 ; CHECK-NEXT: ret i16 [[L_3]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll index 4e6ef0d..5a0c69b 100644 --- a/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll @@ -580,6 +580,127 @@ exit: ret i32 %add } +define i32 @print_mulacc_negated(ptr %a, ptr %b) { +; CHECK-LABEL: 'print_mulacc_negated' +; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { +; CHECK-NEXT: Live-in vp<%0> = VF +; CHECK-NEXT: Live-in vp<%1> = VF * UF +; CHECK-NEXT: Live-in vp<%2> = vector-trip-count +; CHECK-NEXT: Live-in ir<1024> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): scalar.ph, vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: EMIT vp<%3> = reduction-start-vector ir<0>, ir<0>, ir<1> +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: <x1> vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<%4> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%accum> = phi vp<%3>, vp<%8> +; CHECK-NEXT: vp<%5> = SCALAR-STEPS vp<%4>, ir<1>, vp<%0> +; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<%5> +; CHECK-NEXT: vp<%6> = vector-pointer ir<%gep.a> +; CHECK-NEXT: WIDEN ir<%load.a> = load vp<%6> +; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<%5> +; CHECK-NEXT: vp<%7> = vector-pointer ir<%gep.b> +; CHECK-NEXT: WIDEN ir<%load.b> = load vp<%7> +; CHECK-NEXT: EXPRESSION vp<%8> = ir<%accum> + reduce.add (sub (0, mul (ir<%load.b> zext to i32), (ir<%load.a> zext to i32))) +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%4>, vp<%1> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%2> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): middle.block +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<%10> = compute-reduction-result ir<%accum>, vp<%8> +; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<1024>, vp<%2> +; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> +; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: IR %add.lcssa = phi i32 [ %add, %loop ] (extra operand: vp<%10> from middle.block) +; CHECK-NEXT: No successors +; CHECK-EMPTY: +; CHECK-NEXT: scalar.ph: +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%2>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%10>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: Successor(s): ir-bb<loop> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<loop>: +; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph) +; CHECK-NEXT: IR %accum = phi i32 [ 0, %entry ], [ %add, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph) +; CHECK-NEXT: IR %gep.a = getelementptr i8, ptr %a, i64 %iv +; CHECK-NEXT: IR %load.a = load i8, ptr %gep.a, align 1 +; CHECK-NEXT: IR %ext.a = zext i8 %load.a to i32 +; CHECK-NEXT: IR %gep.b = getelementptr i8, ptr %b, i64 %iv +; CHECK-NEXT: IR %load.b = load i8, ptr %gep.b, align 1 +; CHECK-NEXT: IR %ext.b = zext i8 %load.b to i32 +; CHECK-NEXT: IR %mul = mul i32 %ext.b, %ext.a +; CHECK-NEXT: IR %sub = sub i32 0, %mul +; CHECK-NEXT: IR %add = add i32 %accum, %sub +; CHECK-NEXT: IR %iv.next = add i64 %iv, 1 +; CHECK-NEXT: IR %exitcond.not = icmp eq i64 %iv.next, 1024 +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK: VPlan 'Final VPlan for VF={4},UF={1}' { +; CHECK-NEXT: Live-in ir<1024> = vector-trip-count +; CHECK-NEXT: Live-in ir<1024> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: Successor(s): vector.ph +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector.body +; CHECK-EMPTY: +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ] +; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%accum> = phi ir<0>, ir<%add> +; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<%index> +; CHECK-NEXT: WIDEN ir<%load.a> = load ir<%gep.a> +; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<%index> +; CHECK-NEXT: WIDEN ir<%load.b> = load ir<%gep.b> +; CHECK-NEXT: WIDEN-CAST ir<%ext.b> = zext ir<%load.b> to i32 +; CHECK-NEXT: WIDEN-CAST ir<%ext.a> = zext ir<%load.a> to i32 +; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%ext.b>, ir<%ext.a> +; CHECK-NEXT: WIDEN ir<%sub> = sub ir<0>, ir<%mul> +; CHECK-NEXT: REDUCE ir<%add> = ir<%accum> + reduce.add (ir<%sub>) +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<4> +; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, ir<1024> +; CHECK-NEXT: Successor(s): middle.block, vector.body +; CHECK-EMPTY: +; CHECK-NEXT: middle.block: +; CHECK-NEXT: EMIT vp<[[RED_RESULT:%.+]]> = compute-reduction-result ir<%accum>, ir<%add> +; CHECK-NEXT: Successor(s): ir-bb<exit> +; CHECK-EMPTY: +; CHECK-NEXT: ir-bb<exit>: +; CHECK-NEXT: IR %add.lcssa = phi i32 [ %add, %loop ] (extra operand: vp<[[RED_RESULT]]> from middle.block) +; CHECK-NEXT: No successors +; CHECK-NEXT: } +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %accum = phi i32 [ 0, %entry ], [ %add, %loop ] + %gep.a = getelementptr i8, ptr %a, i64 %iv + %load.a = load i8, ptr %gep.a, align 1 + %ext.a = zext i8 %load.a to i32 + %gep.b = getelementptr i8, ptr %b, i64 %iv + %load.b = load i8, ptr %gep.b, align 1 + %ext.b = zext i8 %load.b to i32 + %mul = mul i32 %ext.b, %ext.a + %sub = sub i32 0, %mul + %add = add i32 %accum, %sub + %iv.next = add i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, 1024 + br i1 %exitcond.not, label %exit, label %loop + +exit: + ret i32 %add +} + define i64 @print_mulacc_sub_extended(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) { ; CHECK-LABEL: 'print_mulacc_sub_extended' ; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' { diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll index 645dbc4..4f52227 100644 --- a/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll +++ b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll @@ -7,8 +7,8 @@ define void @const_stride_1_no_reordering(ptr %pl, ptr %ps) { ; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 16 -; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 1 +; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 @@ -28,22 +28,22 @@ define void @const_stride_1_no_reordering(ptr %pl, ptr %ps) { %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 14 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 15 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -62,22 +62,22 @@ define void @const_stride_1_no_reordering(ptr %pl, ptr %ps) { %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 - store i8 %load0, ptr %gep_s0, align 16 - store i8 %load1, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load0, ptr %gep_s0, align 1 + store i8 %load1, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } @@ -87,9 +87,9 @@ define void @const_stride_1_with_reordering(ptr %pl, ptr %ps) { ; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 -; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 16 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> -; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 @@ -109,22 +109,22 @@ define void @const_stride_1_with_reordering(ptr %pl, ptr %ps) { %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 14 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 15 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -144,22 +144,22 @@ define void @const_stride_1_with_reordering(ptr %pl, ptr %ps) { %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 ; NOTE: value from %load1 in stored in %gep_s0 - store i8 %load1, ptr %gep_s0, align 16 - store i8 %load0, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load1, ptr %gep_s0, align 1 + store i8 %load0, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } @@ -170,9 +170,9 @@ define void @const_stride_2_no_reordering(ptr %pl, ptr %ps) { ; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 -; CHECK-NEXT: [[TMP2:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 16, <31 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <31 x i8> poison) +; CHECK-NEXT: [[TMP2:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 1, <31 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <31 x i8> poison) ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <31 x i8> [[TMP2]], <31 x i8> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> -; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 @@ -192,22 +192,22 @@ define void @const_stride_2_no_reordering(ptr %pl, ptr %ps) { %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 28 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 30 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -226,22 +226,22 @@ define void @const_stride_2_no_reordering(ptr %pl, ptr %ps) { %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 - store i8 %load0, ptr %gep_s0, align 16 - store i8 %load1, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load0, ptr %gep_s0, align 1 + store i8 %load1, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } @@ -251,10 +251,10 @@ define void @const_stride_2_with_reordering(ptr %pl, ptr %ps) { ; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 -; CHECK-NEXT: [[TMP1:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 16, <31 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <31 x i8> poison) +; CHECK-NEXT: [[TMP1:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 1, <31 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <31 x i8> poison) ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <31 x i8> [[TMP1]], <31 x i8> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <31 x i8> [[TMP1]], <31 x i8> poison, <16 x i32> <i32 2, i32 0, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30> -; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 @@ -274,22 +274,22 @@ define void @const_stride_2_with_reordering(ptr %pl, ptr %ps) { %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 28 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 30 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -308,22 +308,22 @@ define void @const_stride_2_with_reordering(ptr %pl, ptr %ps) { %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 - store i8 %load1, ptr %gep_s0, align 16 - store i8 %load0, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load1, ptr %gep_s0, align 1 + store i8 %load0, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } @@ -335,8 +335,8 @@ define void @rt_stride_1_no_reordering(ptr %pl, i64 %stride, ptr %ps) { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[STRIDE0]] ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[STRIDE]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 16 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16) -; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 1 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16) +; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %stride0 = mul nsw i64 %stride, 0 @@ -373,22 +373,22 @@ define void @rt_stride_1_no_reordering(ptr %pl, i64 %stride, ptr %ps) { %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %stride14 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %stride15 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -407,22 +407,22 @@ define void @rt_stride_1_no_reordering(ptr %pl, i64 %stride, ptr %ps) { %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 - store i8 %load0, ptr %gep_s0, align 16 - store i8 %load1, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load0, ptr %gep_s0, align 1 + store i8 %load1, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } @@ -434,9 +434,9 @@ define void @rt_stride_1_with_reordering(ptr %pl, i64 %stride, ptr %ps) { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[STRIDE0]] ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[STRIDE]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 16 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16) +; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 1 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16) ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> -; CHECK-NEXT: store <16 x i8> [[TMP3]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: store <16 x i8> [[TMP3]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %stride0 = mul nsw i64 %stride, 0 @@ -473,22 +473,22 @@ define void @rt_stride_1_with_reordering(ptr %pl, i64 %stride, ptr %ps) { %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %stride14 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %stride15 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -507,22 +507,22 @@ define void @rt_stride_1_with_reordering(ptr %pl, i64 %stride, ptr %ps) { %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 - store i8 %load1, ptr %gep_s0, align 16 - store i8 %load0, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load1, ptr %gep_s0, align 1 + store i8 %load0, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } @@ -531,9 +531,9 @@ define void @rt_stride_1_with_reordering(ptr %pl, i64 %stride, ptr %ps) { ; define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0 ; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 -; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 16 %gep_l0, i64 8, <4 x i1> splat (i1 true), i32 4) +; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 1 %gep_l0, i64 8, <4 x i1> splat (i1 true), i32 4) ; %bitcast_ = bitcast <4 x i32> %strided_load to <16 x i8> -; store <16 x i8> %bitcast_, ptr %gep_s0, align 16 +; store <16 x i8> %bitcast_, ptr %gep_s0, align 1 ; ret void ; } define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { @@ -541,9 +541,9 @@ define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) ; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] { ; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 -; CHECK-NEXT: [[TMP1:%.*]] = call <28 x i8> @llvm.masked.load.v28i8.p0(ptr [[GEP_L0]], i32 16, <28 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <28 x i8> poison) +; CHECK-NEXT: [[TMP1:%.*]] = call <28 x i8> @llvm.masked.load.v28i8.p0(ptr [[GEP_L0]], i32 1, <28 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <28 x i8> poison) ; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <28 x i8> [[TMP1]], <28 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27> -; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 @@ -563,22 +563,22 @@ define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 26 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 27 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -597,22 +597,22 @@ define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 - store i8 %load0, ptr %gep_s0, align 16 - store i8 %load1, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load0, ptr %gep_s0, align 1 + store i8 %load1, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } @@ -621,9 +621,9 @@ define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) ; define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { ; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0 ; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 -; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 16 %gep_l0, i64 %stride, <4 x i1> splat (i1 true), i32 4) +; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 1 %gep_l0, i64 %stride, <4 x i1> splat (i1 true), i32 4) ; %bitcast_ = bitcast <4 x i32> %strided_load to <16 x i8> -; store <16 x i8> %bitcast_, ptr %gep_s0, align 16 +; store <16 x i8> %bitcast_, ptr %gep_s0, align 1 ; ret void ; } define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { @@ -638,10 +638,10 @@ define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { ; CHECK-NEXT: [[GEP_L8:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET8]] ; CHECK-NEXT: [[GEP_L12:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET12]] ; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 -; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[GEP_L0]], align 16 -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_L4]], align 16 -; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i8>, ptr [[GEP_L8]], align 16 -; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i8>, ptr [[GEP_L12]], align 16 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[GEP_L0]], align 1 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_L4]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i8>, ptr [[GEP_L8]], align 1 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i8>, ptr [[GEP_L12]], align 1 ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> ; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> [[TMP2]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> @@ -649,7 +649,7 @@ define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { ; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP11]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison> ; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i8> [[TMP4]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> ; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19> -; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 16 +; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 1 ; CHECK-NEXT: ret void ; %offset0 = mul nsw i64 %stride, 0 @@ -686,22 +686,22 @@ define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %offset14 %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %offset15 - %load0 = load i8, ptr %gep_l0 , align 16 - %load1 = load i8, ptr %gep_l1 , align 16 - %load2 = load i8, ptr %gep_l2 , align 16 - %load3 = load i8, ptr %gep_l3 , align 16 - %load4 = load i8, ptr %gep_l4 , align 16 - %load5 = load i8, ptr %gep_l5 , align 16 - %load6 = load i8, ptr %gep_l6 , align 16 - %load7 = load i8, ptr %gep_l7 , align 16 - %load8 = load i8, ptr %gep_l8 , align 16 - %load9 = load i8, ptr %gep_l9 , align 16 - %load10 = load i8, ptr %gep_l10, align 16 - %load11 = load i8, ptr %gep_l11, align 16 - %load12 = load i8, ptr %gep_l12, align 16 - %load13 = load i8, ptr %gep_l13, align 16 - %load14 = load i8, ptr %gep_l14, align 16 - %load15 = load i8, ptr %gep_l15, align 16 + %load0 = load i8, ptr %gep_l0 , align 1 + %load1 = load i8, ptr %gep_l1 , align 1 + %load2 = load i8, ptr %gep_l2 , align 1 + %load3 = load i8, ptr %gep_l3 , align 1 + %load4 = load i8, ptr %gep_l4 , align 1 + %load5 = load i8, ptr %gep_l5 , align 1 + %load6 = load i8, ptr %gep_l6 , align 1 + %load7 = load i8, ptr %gep_l7 , align 1 + %load8 = load i8, ptr %gep_l8 , align 1 + %load9 = load i8, ptr %gep_l9 , align 1 + %load10 = load i8, ptr %gep_l10, align 1 + %load11 = load i8, ptr %gep_l11, align 1 + %load12 = load i8, ptr %gep_l12, align 1 + %load13 = load i8, ptr %gep_l13, align 1 + %load14 = load i8, ptr %gep_l14, align 1 + %load15 = load i8, ptr %gep_l15, align 1 %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 @@ -720,22 +720,22 @@ define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) { %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 - store i8 %load0, ptr %gep_s0, align 16 - store i8 %load1, ptr %gep_s1, align 16 - store i8 %load2, ptr %gep_s2, align 16 - store i8 %load3, ptr %gep_s3, align 16 - store i8 %load4, ptr %gep_s4, align 16 - store i8 %load5, ptr %gep_s5, align 16 - store i8 %load6, ptr %gep_s6, align 16 - store i8 %load7, ptr %gep_s7, align 16 - store i8 %load8, ptr %gep_s8, align 16 - store i8 %load9, ptr %gep_s9, align 16 - store i8 %load10, ptr %gep_s10, align 16 - store i8 %load11, ptr %gep_s11, align 16 - store i8 %load12, ptr %gep_s12, align 16 - store i8 %load13, ptr %gep_s13, align 16 - store i8 %load14, ptr %gep_s14, align 16 - store i8 %load15, ptr %gep_s15, align 16 + store i8 %load0, ptr %gep_s0, align 1 + store i8 %load1, ptr %gep_s1, align 1 + store i8 %load2, ptr %gep_s2, align 1 + store i8 %load3, ptr %gep_s3, align 1 + store i8 %load4, ptr %gep_s4, align 1 + store i8 %load5, ptr %gep_s5, align 1 + store i8 %load6, ptr %gep_s6, align 1 + store i8 %load7, ptr %gep_s7, align 1 + store i8 %load8, ptr %gep_s8, align 1 + store i8 %load9, ptr %gep_s9, align 1 + store i8 %load10, ptr %gep_s10, align 1 + store i8 %load11, ptr %gep_s11, align 1 + store i8 %load12, ptr %gep_s12, align 1 + store i8 %load13, ptr %gep_s13, align 1 + store i8 %load14, ptr %gep_s14, align 1 + store i8 %load15, ptr %gep_s15, align 1 ret void } diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/test-delete-tree.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/test-delete-tree.ll new file mode 100644 index 0000000..c4e6c4e --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/RISCV/test-delete-tree.ll @@ -0,0 +1,83 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -mtriple=riscv64 -mattr=+m,+v -passes=slp-vectorizer -S < %s | FileCheck %s + +; CHECK-NOT: TreeEntryToStridedPtrInfoMap is not cleared +define void @const_stride_1_no_reordering(ptr %pl, ptr %ps) { +; CHECK-LABEL: define void @const_stride_1_no_reordering( +; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0 +; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0 +; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 1 +; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 1 +; CHECK-NEXT: ret void +; + %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0 + %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1 + %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2 + %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3 + %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 4 + %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 5 + %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 6 + %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 7 + %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 8 + %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 9 + %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 10 + %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 11 + %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 12 + %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 13 + %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 14 + %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 15 + + %load0 = load i8, ptr %gep_l0 + %load1 = load i8, ptr %gep_l1 + %load2 = load i8, ptr %gep_l2 + %load3 = load i8, ptr %gep_l3 + %load4 = load i8, ptr %gep_l4 + %load5 = load i8, ptr %gep_l5 + %load6 = load i8, ptr %gep_l6 + %load7 = load i8, ptr %gep_l7 + %load8 = load i8, ptr %gep_l8 + %load9 = load i8, ptr %gep_l9 + %load10 = load i8, ptr %gep_l10 + %load11 = load i8, ptr %gep_l11 + %load12 = load i8, ptr %gep_l12 + %load13 = load i8, ptr %gep_l13 + %load14 = load i8, ptr %gep_l14 + %load15 = load i8, ptr %gep_l15 + + %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0 + %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1 + %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2 + %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3 + %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4 + %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5 + %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6 + %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7 + %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8 + %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9 + %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10 + %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11 + %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12 + %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13 + %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14 + %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15 + + store i8 %load0, ptr %gep_s0 + store i8 %load1, ptr %gep_s1 + store i8 %load2, ptr %gep_s2 + store i8 %load3, ptr %gep_s3 + store i8 %load4, ptr %gep_s4 + store i8 %load5, ptr %gep_s5 + store i8 %load6, ptr %gep_s6 + store i8 %load7, ptr %gep_s7 + store i8 %load8, ptr %gep_s8 + store i8 %load9, ptr %gep_s9 + store i8 %load10, ptr %gep_s10 + store i8 %load11, ptr %gep_s11 + store i8 %load12, ptr %gep_s12 + store i8 %load13, ptr %gep_s13 + store i8 %load14, ptr %gep_s14 + store i8 %load15, ptr %gep_s15 + + ret void +} diff --git a/llvm/test/Transforms/VectorCombine/X86/bitop-of-castops.ll b/llvm/test/Transforms/VectorCombine/X86/bitop-of-castops.ll index 79e72aa..38c624e 100644 --- a/llvm/test/Transforms/VectorCombine/X86/bitop-of-castops.ll +++ b/llvm/test/Transforms/VectorCombine/X86/bitop-of-castops.ll @@ -357,7 +357,7 @@ define <4 x i32> @or_sext_v4i8_to_v4i32_constant_with_loss(<4 x i8> %a) { define <4 x i16> @and_trunc_nuw_nsw_constant(<4 x i32> %a) { ; CHECK-LABEL: @and_trunc_nuw_nsw_constant( ; CHECK-NEXT: [[AND_INNER:%.*]] = and <4 x i32> [[A:%.*]], <i32 1, i32 2, i32 3, i32 4> -; CHECK-NEXT: [[AND:%.*]] = trunc <4 x i32> [[AND_INNER]] to <4 x i16> +; CHECK-NEXT: [[AND:%.*]] = trunc nuw nsw <4 x i32> [[AND_INNER]] to <4 x i16> ; CHECK-NEXT: ret <4 x i16> [[AND]] ; %t1 = trunc nuw nsw <4 x i32> %a to <4 x i16> @@ -368,7 +368,7 @@ define <4 x i16> @and_trunc_nuw_nsw_constant(<4 x i32> %a) { define <4 x i8> @and_trunc_nuw_nsw_minus_constant(<4 x i32> %a) { ; CHECK-LABEL: @and_trunc_nuw_nsw_minus_constant( ; CHECK-NEXT: [[AND_INNER:%.*]] = and <4 x i32> [[A:%.*]], <i32 240, i32 241, i32 242, i32 243> -; CHECK-NEXT: [[AND:%.*]] = trunc <4 x i32> [[AND_INNER]] to <4 x i8> +; CHECK-NEXT: [[AND:%.*]] = trunc nuw <4 x i32> [[AND_INNER]] to <4 x i8> ; CHECK-NEXT: ret <4 x i8> [[AND]] ; %t1 = trunc nuw nsw <4 x i32> %a to <4 x i8> @@ -379,7 +379,7 @@ define <4 x i8> @and_trunc_nuw_nsw_minus_constant(<4 x i32> %a) { define <4 x i8> @and_trunc_nuw_nsw_multiconstant(<4 x i32> %a) { ; CHECK-LABEL: @and_trunc_nuw_nsw_multiconstant( ; CHECK-NEXT: [[AND_INNER:%.*]] = and <4 x i32> [[A:%.*]], <i32 240, i32 1, i32 242, i32 3> -; CHECK-NEXT: [[AND:%.*]] = trunc <4 x i32> [[AND_INNER]] to <4 x i8> +; CHECK-NEXT: [[AND:%.*]] = trunc nuw <4 x i32> [[AND_INNER]] to <4 x i8> ; CHECK-NEXT: ret <4 x i8> [[AND]] ; %t1 = trunc nuw nsw <4 x i32> %a to <4 x i8> @@ -391,7 +391,7 @@ define <4 x i8> @and_trunc_nuw_nsw_multiconstant(<4 x i32> %a) { define <4 x i32> @or_zext_nneg_constant(<4 x i16> %a) { ; CHECK-LABEL: @or_zext_nneg_constant( ; CHECK-NEXT: [[OR_INNER:%.*]] = or <4 x i16> [[A:%.*]], <i16 1, i16 2, i16 3, i16 4> -; CHECK-NEXT: [[OR:%.*]] = zext <4 x i16> [[OR_INNER]] to <4 x i32> +; CHECK-NEXT: [[OR:%.*]] = zext nneg <4 x i16> [[OR_INNER]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[OR]] ; %z1 = zext nneg <4 x i16> %a to <4 x i32> |