diff options
Diffstat (limited to 'llvm/test/Transforms')
56 files changed, 5166 insertions, 605 deletions
diff --git a/llvm/test/Transforms/FunctionAttrs/noalias.ll b/llvm/test/Transforms/FunctionAttrs/noalias.ll new file mode 100644 index 0000000..de8bd9e --- /dev/null +++ b/llvm/test/Transforms/FunctionAttrs/noalias.ll @@ -0,0 +1,245 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S -passes=function-attrs < %s | FileCheck %s + +declare noalias ptr @malloc(i64 %size) +declare ptr @not_malloc(i64 %size) +declare void @capture(ptr) + +@g = external global i8 + +define ptr @return_malloc(i64 %size) { +; CHECK-LABEL: define noalias ptr @return_malloc( +; CHECK-SAME: i64 [[SIZE:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = call ptr @malloc(i64 [[SIZE]]) +; CHECK-NEXT: ret ptr [[A]] +; + %a = call ptr @malloc(i64 %size) + ret ptr %a +} + +define ptr @return_not_malloc(i64 %size) { +; CHECK-LABEL: define ptr @return_not_malloc( +; CHECK-SAME: i64 [[SIZE:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = call ptr @not_malloc(i64 [[SIZE]]) +; CHECK-NEXT: ret ptr [[A]] +; + %a = call ptr @not_malloc(i64 %size) + ret ptr %a +} + +define ptr @return_null() { +; CHECK-LABEL: define noalias noundef ptr @return_null( +; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: ret ptr null +; + ret ptr null +} + +define ptr @return_poison() { +; CHECK-LABEL: define noalias ptr @return_poison( +; CHECK-SAME: ) #[[ATTR0]] { +; CHECK-NEXT: ret ptr poison +; + ret ptr poison +} + +define ptr @return_alloca() { +; CHECK-LABEL: define noalias noundef nonnull ptr @return_alloca( +; CHECK-SAME: ) #[[ATTR0]] { +; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 +; CHECK-NEXT: ret ptr [[A]] +; + %a = alloca i8 + ret ptr %a +} + +; noalias arg does not imply noalias return +define ptr @return_noalias_arg(ptr noalias %arg) { +; CHECK-LABEL: define ptr @return_noalias_arg( +; CHECK-SAME: ptr noalias readnone returned captures(ret: address, provenance) [[ARG:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: ret ptr [[ARG]] +; + ret ptr %arg +} + +define ptr @return_global() { +; CHECK-LABEL: define noundef nonnull ptr @return_global( +; CHECK-SAME: ) #[[ATTR0]] { +; CHECK-NEXT: ret ptr @g +; + ret ptr @g +} + +define ptr @no_return() { +; CHECK-LABEL: define noalias noundef nonnull ptr @no_return( +; CHECK-SAME: ) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: unreachable +; + unreachable +} + +define ptr @return_multiple(i1 %c, i64 %size) { +; CHECK-LABEL: define noalias ptr @return_multiple( +; CHECK-SAME: i1 [[C:%.*]], i64 [[SIZE:%.*]]) { +; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]] +; CHECK: [[IF]]: +; CHECK-NEXT: [[A:%.*]] = call ptr @malloc(i64 [[SIZE]]) +; CHECK-NEXT: ret ptr [[A]] +; CHECK: [[ELSE]]: +; CHECK-NEXT: [[B:%.*]] = call ptr @malloc(i64 [[SIZE]]) +; CHECK-NEXT: ret ptr [[B]] +; +br i1 %c, label %if, label %else + +if: + %a = call ptr @malloc(i64 %size) + ret ptr %a + +else: + %b = call ptr @malloc(i64 %size) + ret ptr %b +} + +define ptr @return_select(i1 %c, i64 %size) { +; CHECK-LABEL: define noalias ptr @return_select( +; CHECK-SAME: i1 [[C:%.*]], i64 [[SIZE:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = call ptr @malloc(i64 [[SIZE]]) +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C]], ptr [[A]], ptr null +; CHECK-NEXT: ret ptr [[SEL]] +; + %a = call ptr @malloc(i64 %size) + %sel = select i1 %c, ptr %a, ptr null + ret ptr %sel +} + +define ptr @return_phi(i1 %c, i64 %size) { +; CHECK-LABEL: define noalias ptr @return_phi( +; CHECK-SAME: i1 [[C:%.*]], i64 [[SIZE:%.*]]) { +; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]] +; CHECK: [[IF]]: +; CHECK-NEXT: [[A:%.*]] = call ptr @malloc(i64 [[SIZE]]) +; CHECK-NEXT: br label %[[JOIN:.*]] +; CHECK: [[ELSE]]: +; CHECK-NEXT: br label %[[JOIN]] +; CHECK: [[JOIN]]: +; CHECK-NEXT: [[PHI:%.*]] = phi ptr [ [[A]], %[[IF]] ], [ null, %[[ELSE]] ] +; CHECK-NEXT: ret ptr [[PHI]] +; + br i1 %c, label %if, label %else + +if: + %a = call ptr @malloc(i64 %size) + br label %join + +else: + br label %join + +join: + %phi = phi ptr [ %a, %if ], [ null, %else ] + ret ptr %phi +} + +define ptr @return_phi_wrong(i1 %c, i64 %size) { +; CHECK-LABEL: define ptr @return_phi_wrong( +; CHECK-SAME: i1 [[C:%.*]], i64 [[SIZE:%.*]]) { +; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]] +; CHECK: [[IF]]: +; CHECK-NEXT: [[A:%.*]] = call ptr @malloc(i64 [[SIZE]]) +; CHECK-NEXT: br label %[[JOIN:.*]] +; CHECK: [[ELSE]]: +; CHECK-NEXT: [[B:%.*]] = call ptr @not_malloc(i64 [[SIZE]]) +; CHECK-NEXT: br label %[[JOIN]] +; CHECK: [[JOIN]]: +; CHECK-NEXT: [[PHI:%.*]] = phi ptr [ [[A]], %[[IF]] ], [ [[B]], %[[ELSE]] ] +; CHECK-NEXT: ret ptr [[PHI]] +; + br i1 %c, label %if, label %else + +if: + %a = call ptr @malloc(i64 %size) + br label %join + +else: + %b = call ptr @not_malloc(i64 %size) + br label %join + +join: + %phi = phi ptr [ %a, %if ], [ %b, %else ] + ret ptr %phi +} + +define ptr @return_malloc_with_store(i64 %size) { +; CHECK-LABEL: define noalias noundef ptr @return_malloc_with_store( +; CHECK-SAME: i64 [[SIZE:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = call ptr @malloc(i64 [[SIZE]]) +; CHECK-NEXT: store i8 0, ptr [[A]], align 1 +; CHECK-NEXT: ret ptr [[A]] +; + %a = call ptr @malloc(i64 %size) + store i8 0, ptr %a + ret ptr %a +} + +define ptr @return_malloc_captured(i64 %size) { +; CHECK-LABEL: define ptr @return_malloc_captured( +; CHECK-SAME: i64 [[SIZE:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = call ptr @malloc(i64 [[SIZE]]) +; CHECK-NEXT: call void @capture(ptr [[A]]) +; CHECK-NEXT: ret ptr [[A]] +; + %a = call ptr @malloc(i64 %size) + call void @capture(ptr %a) + ret ptr %a +} + +define ptr @scc1(i1 %c) { +; CHECK-LABEL: define noalias ptr @scc1( +; CHECK-SAME: i1 [[C:%.*]]) { +; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]] +; CHECK: [[IF]]: +; CHECK-NEXT: [[A:%.*]] = call ptr @malloc(i64 4) +; CHECK-NEXT: ret ptr [[A]] +; CHECK: [[ELSE]]: +; CHECK-NEXT: [[B:%.*]] = call ptr @scc2(i1 [[C]]) +; CHECK-NEXT: ret ptr [[B]] +; + br i1 %c, label %if, label %else + +if: + %a = call ptr @malloc(i64 4) + ret ptr %a + +else: + %b = call ptr @scc2(i1 %c) + ret ptr %b +} + +define ptr @scc2(i1 %c) { +; CHECK-LABEL: define noalias ptr @scc2( +; CHECK-SAME: i1 [[C:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = call ptr @scc1(i1 [[C]]) +; CHECK-NEXT: ret ptr [[A]] +; + %a = call ptr @scc1(i1 %c) + ret ptr %a +} + +define ptr @return_unknown_call(ptr %fn) { +; CHECK-LABEL: define ptr @return_unknown_call( +; CHECK-SAME: ptr readonly captures(none) [[FN:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = call ptr [[FN]]() +; CHECK-NEXT: ret ptr [[A]] +; + %a = call ptr %fn() + ret ptr %a +} + +define ptr @return_unknown_noalias_call(ptr %fn) { +; CHECK-LABEL: define noalias ptr @return_unknown_noalias_call( +; CHECK-SAME: ptr readonly captures(none) [[FN:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = call noalias ptr [[FN]]() +; CHECK-NEXT: ret ptr [[A]] +; + %a = call noalias ptr %fn() + ret ptr %a +} diff --git a/llvm/test/Transforms/FunctionAttrs/nofree.ll b/llvm/test/Transforms/FunctionAttrs/nofree.ll index 1671189..89f030d 100644 --- a/llvm/test/Transforms/FunctionAttrs/nofree.ll +++ b/llvm/test/Transforms/FunctionAttrs/nofree.ll @@ -156,6 +156,24 @@ entry: ret void } +define void @unknown_call(ptr %fn) { +; CHECK-LABEL: @unknown_call( +; CHECK-NEXT: call void [[FN:%.*]]() +; CHECK-NEXT: ret void +; + call void %fn() + ret void +} + +define void @unknown_nofree_call(ptr %fn) { +; CHECK-LABEL: @unknown_nofree_call( +; CHECK-NEXT: call void [[FN:%.*]]() #[[ATTR5]] +; CHECK-NEXT: ret void +; + call void %fn() nofree + ret void +} + declare void @_ZdaPv(ptr) local_unnamed_addr #4 attributes #0 = { uwtable } diff --git a/llvm/test/Transforms/FunctionAttrs/nonnull.ll b/llvm/test/Transforms/FunctionAttrs/nonnull.ll index 483b560..8df242f 100644 --- a/llvm/test/Transforms/FunctionAttrs/nonnull.ll +++ b/llvm/test/Transforms/FunctionAttrs/nonnull.ll @@ -1396,5 +1396,35 @@ define ptr @pr91177_non_inbounds_gep(ptr nonnull %arg) { ret ptr %res } +define ptr @unknown_func(ptr %fn) { +; FNATTRS-LABEL: define ptr @unknown_func( +; FNATTRS-SAME: ptr readonly captures(none) [[FN:%.*]]) { +; FNATTRS-NEXT: [[RES:%.*]] = call ptr [[FN]]() +; FNATTRS-NEXT: ret ptr [[RES]] +; +; ATTRIBUTOR-LABEL: define ptr @unknown_func( +; ATTRIBUTOR-SAME: ptr nofree nonnull captures(none) [[FN:%.*]]) { +; ATTRIBUTOR-NEXT: [[RES:%.*]] = call ptr [[FN]]() +; ATTRIBUTOR-NEXT: ret ptr [[RES]] +; + %res = call ptr %fn() + ret ptr %res +} + +define ptr @unknown_nonnull_func(ptr %fn) { +; FNATTRS-LABEL: define nonnull ptr @unknown_nonnull_func( +; FNATTRS-SAME: ptr readonly captures(none) [[FN:%.*]]) { +; FNATTRS-NEXT: [[RES:%.*]] = call nonnull ptr [[FN]]() +; FNATTRS-NEXT: ret ptr [[RES]] +; +; ATTRIBUTOR-LABEL: define nonnull ptr @unknown_nonnull_func( +; ATTRIBUTOR-SAME: ptr nofree nonnull captures(none) [[FN:%.*]]) { +; ATTRIBUTOR-NEXT: [[RES:%.*]] = call nonnull ptr [[FN]]() +; ATTRIBUTOR-NEXT: ret ptr [[RES]] +; + %res = call nonnull ptr %fn() + ret ptr %res +} + attributes #0 = { null_pointer_is_valid } attributes #1 = { nounwind willreturn} diff --git a/llvm/test/Transforms/FunctionAttrs/norecurse.ll b/llvm/test/Transforms/FunctionAttrs/norecurse.ll index 7a089f6..5cb8ac0 100644 --- a/llvm/test/Transforms/FunctionAttrs/norecurse.ll +++ b/llvm/test/Transforms/FunctionAttrs/norecurse.ll @@ -241,6 +241,37 @@ define void @r() norecurse { call void @q() ret void } + +define void @unknown_call(ptr %fn) { +; FNATTRS-LABEL: define {{[^@]+}}@unknown_call +; FNATTRS-SAME: (ptr readonly captures(none) [[FN:%.*]]) { +; FNATTRS-NEXT: call void [[FN]]() +; FNATTRS-NEXT: ret void +; +; ATTRIBUTOR-LABEL: define {{[^@]+}}@unknown_call +; ATTRIBUTOR-SAME: (ptr nofree nonnull captures(none) [[FN:%.*]]) { +; ATTRIBUTOR-NEXT: call void [[FN]]() +; ATTRIBUTOR-NEXT: ret void +; + call void %fn() + ret void +} + +define void @unknown_norecurse_call(ptr %fn) { +; FNATTRS-LABEL: define {{[^@]+}}@unknown_norecurse_call +; FNATTRS-SAME: (ptr readonly captures(none) [[FN:%.*]]) { +; FNATTRS-NEXT: call void [[FN]]() #[[ATTR7:[0-9]+]] +; FNATTRS-NEXT: ret void +; +; ATTRIBUTOR-LABEL: define {{[^@]+}}@unknown_norecurse_call +; ATTRIBUTOR-SAME: (ptr nofree nonnull captures(none) [[FN:%.*]]) { +; ATTRIBUTOR-NEXT: call void [[FN]]() #[[ATTR9:[0-9]+]] +; ATTRIBUTOR-NEXT: ret void +; + call void %fn() norecurse + ret void +} + ;. ; FNATTRS: attributes #[[ATTR0]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) } ; FNATTRS: attributes #[[ATTR1]] = { nofree nosync nounwind memory(none) } @@ -249,6 +280,7 @@ define void @r() norecurse { ; FNATTRS: attributes #[[ATTR4]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) } ; FNATTRS: attributes #[[ATTR5:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) } ; FNATTRS: attributes #[[ATTR6]] = { nofree norecurse nosync memory(none) } +; FNATTRS: attributes #[[ATTR7]] = { norecurse } ;. ; ATTRIBUTOR: attributes #[[ATTR0]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) } ; ATTRIBUTOR: attributes #[[ATTR1]] = { nofree nosync nounwind memory(none) } @@ -259,6 +291,7 @@ define void @r() norecurse { ; ATTRIBUTOR: attributes #[[ATTR6]] = { norecurse nosync memory(none) } ; ATTRIBUTOR: attributes #[[ATTR7]] = { nosync } ; ATTRIBUTOR: attributes #[[ATTR8]] = { nofree willreturn } +; ATTRIBUTOR: attributes #[[ATTR9]] = { norecurse } ;. ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; COMMON: {{.*}} diff --git a/llvm/test/Transforms/FunctionAttrs/nounwind.ll b/llvm/test/Transforms/FunctionAttrs/nounwind.ll index afa9ae3..076a7df 100644 --- a/llvm/test/Transforms/FunctionAttrs/nounwind.ll +++ b/llvm/test/Transforms/FunctionAttrs/nounwind.ll @@ -4,10 +4,15 @@ ; TEST 1 define i32 @foo1() { -; COMMON: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none) -; COMMON-LABEL: define {{[^@]+}}@foo1 -; COMMON-SAME: () #[[ATTR0:[0-9]+]] { -; COMMON-NEXT: ret i32 1 +; FNATTRS: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none) +; FNATTRS-LABEL: define {{[^@]+}}@foo1 +; FNATTRS-SAME: () #[[ATTR0:[0-9]+]] { +; FNATTRS-NEXT: ret i32 1 +; +; ATTRIBUTOR: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none) +; ATTRIBUTOR-LABEL: define {{[^@]+}}@foo1 +; ATTRIBUTOR-SAME: () #[[ATTR0:[0-9]+]] { +; ATTRIBUTOR-NEXT: ret i32 1 ; ret i32 1 } @@ -70,14 +75,23 @@ define void @call_non_nounwind(){ ; } define i32 @maybe_throw(i1 zeroext %0) { -; COMMON-LABEL: define {{[^@]+}}@maybe_throw -; COMMON-SAME: (i1 zeroext [[TMP0:%.*]]) { -; COMMON-NEXT: br i1 [[TMP0]], label [[TMP2:%.*]], label [[TMP3:%.*]] -; COMMON: 2: -; COMMON-NEXT: tail call void @__cxa_rethrow() -; COMMON-NEXT: unreachable -; COMMON: 3: -; COMMON-NEXT: ret i32 -1 +; FNATTRS-LABEL: define {{[^@]+}}@maybe_throw +; FNATTRS-SAME: (i1 zeroext [[TMP0:%.*]]) { +; FNATTRS-NEXT: br i1 [[TMP0]], label [[TMP2:%.*]], label [[TMP3:%.*]] +; FNATTRS: 2: +; FNATTRS-NEXT: tail call void @__cxa_rethrow() +; FNATTRS-NEXT: unreachable +; FNATTRS: 3: +; FNATTRS-NEXT: ret i32 -1 +; +; ATTRIBUTOR-LABEL: define {{[^@]+}}@maybe_throw +; ATTRIBUTOR-SAME: (i1 zeroext [[TMP0:%.*]]) { +; ATTRIBUTOR-NEXT: br i1 [[TMP0]], label [[TMP2:%.*]], label [[TMP3:%.*]] +; ATTRIBUTOR: 2: +; ATTRIBUTOR-NEXT: tail call void @__cxa_rethrow() +; ATTRIBUTOR-NEXT: unreachable +; ATTRIBUTOR: 3: +; ATTRIBUTOR-NEXT: ret i32 -1 ; br i1 %0, label %2, label %3 @@ -101,18 +115,31 @@ declare void @__cxa_rethrow() ; } define i32 @catch_thing() personality ptr @__gxx_personality_v0 { -; COMMON-LABEL: define {{[^@]+}}@catch_thing() personality ptr @__gxx_personality_v0 { -; COMMON-NEXT: invoke void @__cxa_rethrow() -; COMMON-NEXT: to label [[TMP1:%.*]] unwind label [[TMP2:%.*]] -; COMMON: 1: -; COMMON-NEXT: unreachable -; COMMON: 2: -; COMMON-NEXT: [[TMP3:%.*]] = landingpad { ptr, i32 } -; COMMON-NEXT: catch ptr null -; COMMON-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 0 -; COMMON-NEXT: [[TMP5:%.*]] = tail call ptr @__cxa_begin_catch(ptr [[TMP4]]) -; COMMON-NEXT: tail call void @__cxa_end_catch() -; COMMON-NEXT: ret i32 -1 +; FNATTRS-LABEL: define {{[^@]+}}@catch_thing() personality ptr @__gxx_personality_v0 { +; FNATTRS-NEXT: invoke void @__cxa_rethrow() +; FNATTRS-NEXT: to label [[TMP1:%.*]] unwind label [[TMP2:%.*]] +; FNATTRS: 1: +; FNATTRS-NEXT: unreachable +; FNATTRS: 2: +; FNATTRS-NEXT: [[TMP3:%.*]] = landingpad { ptr, i32 } +; FNATTRS-NEXT: catch ptr null +; FNATTRS-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 0 +; FNATTRS-NEXT: [[TMP5:%.*]] = tail call ptr @__cxa_begin_catch(ptr [[TMP4]]) +; FNATTRS-NEXT: tail call void @__cxa_end_catch() +; FNATTRS-NEXT: ret i32 -1 +; +; ATTRIBUTOR-LABEL: define {{[^@]+}}@catch_thing() personality ptr @__gxx_personality_v0 { +; ATTRIBUTOR-NEXT: invoke void @__cxa_rethrow() +; ATTRIBUTOR-NEXT: to label [[TMP1:%.*]] unwind label [[TMP2:%.*]] +; ATTRIBUTOR: 1: +; ATTRIBUTOR-NEXT: unreachable +; ATTRIBUTOR: 2: +; ATTRIBUTOR-NEXT: [[TMP3:%.*]] = landingpad { ptr, i32 } +; ATTRIBUTOR-NEXT: catch ptr null +; ATTRIBUTOR-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 0 +; ATTRIBUTOR-NEXT: [[TMP5:%.*]] = tail call ptr @__cxa_begin_catch(ptr [[TMP4]]) +; ATTRIBUTOR-NEXT: tail call void @__cxa_end_catch() +; ATTRIBUTOR-NEXT: ret i32 -1 ; invoke void @__cxa_rethrow() #1 to label %1 unwind label %2 @@ -130,9 +157,13 @@ define i32 @catch_thing() personality ptr @__gxx_personality_v0 { } define i32 @catch_thing_user() { -; COMMON-LABEL: define {{[^@]+}}@catch_thing_user() { -; COMMON-NEXT: [[CATCH_THING_CALL:%.*]] = call i32 @catch_thing() -; COMMON-NEXT: ret i32 [[CATCH_THING_CALL]] +; FNATTRS-LABEL: define {{[^@]+}}@catch_thing_user() { +; FNATTRS-NEXT: [[CATCH_THING_CALL:%.*]] = call i32 @catch_thing() +; FNATTRS-NEXT: ret i32 [[CATCH_THING_CALL]] +; +; ATTRIBUTOR-LABEL: define {{[^@]+}}@catch_thing_user() { +; ATTRIBUTOR-NEXT: [[CATCH_THING_CALL:%.*]] = call i32 @catch_thing() +; ATTRIBUTOR-NEXT: ret i32 [[CATCH_THING_CALL]] ; %catch_thing_call = call i32 @catch_thing() ret i32 %catch_thing_call @@ -147,10 +178,10 @@ define void @catch_specific_landingpad() personality ptr @__gxx_personality_v0 { ; COMMON-LABEL: define {{[^@]+}}@catch_specific_landingpad ; COMMON-SAME: () #[[ATTR3:[0-9]+]] personality ptr @__gxx_personality_v0 { ; COMMON-NEXT: invoke void @do_throw() -; COMMON-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[LPAD:%.*]] +; COMMON-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[LPAD:%.*]] ; COMMON: lpad: ; COMMON-NEXT: [[LP:%.*]] = landingpad { ptr, i32 } -; COMMON-NEXT: catch ptr @catch_ty +; COMMON-NEXT: catch ptr @catch_ty ; COMMON-NEXT: call void @abort() ; COMMON-NEXT: unreachable ; COMMON: unreachable: @@ -174,10 +205,10 @@ define void @catch_all_landingpad() personality ptr @__gxx_personality_v0 { ; COMMON-LABEL: define {{[^@]+}}@catch_all_landingpad ; COMMON-SAME: () #[[ATTR4:[0-9]+]] personality ptr @__gxx_personality_v0 { ; COMMON-NEXT: invoke void @do_throw() -; COMMON-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[LPAD:%.*]] +; COMMON-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[LPAD:%.*]] ; COMMON: lpad: ; COMMON-NEXT: [[LP:%.*]] = landingpad { ptr, i32 } -; COMMON-NEXT: catch ptr null +; COMMON-NEXT: catch ptr null ; COMMON-NEXT: call void @abort() ; COMMON-NEXT: unreachable ; COMMON: unreachable: @@ -201,10 +232,10 @@ define void @filter_specific_landingpad() personality ptr @__gxx_personality_v0 ; COMMON-LABEL: define {{[^@]+}}@filter_specific_landingpad ; COMMON-SAME: () #[[ATTR3]] personality ptr @__gxx_personality_v0 { ; COMMON-NEXT: invoke void @do_throw() -; COMMON-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[LPAD:%.*]] +; COMMON-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[LPAD:%.*]] ; COMMON: lpad: ; COMMON-NEXT: [[LP:%.*]] = landingpad { ptr, i32 } -; COMMON-NEXT: filter [1 x ptr] [ptr @catch_ty] +; COMMON-NEXT: filter [1 x ptr] [ptr @catch_ty] ; COMMON-NEXT: call void @abort() ; COMMON-NEXT: unreachable ; COMMON: unreachable: @@ -228,10 +259,10 @@ define void @filter_none_landingpad() personality ptr @__gxx_personality_v0 { ; COMMON-LABEL: define {{[^@]+}}@filter_none_landingpad ; COMMON-SAME: () #[[ATTR4]] personality ptr @__gxx_personality_v0 { ; COMMON-NEXT: invoke void @do_throw() -; COMMON-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[LPAD:%.*]] +; COMMON-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[LPAD:%.*]] ; COMMON: lpad: ; COMMON-NEXT: [[LP:%.*]] = landingpad { ptr, i32 } -; COMMON-NEXT: filter [0 x ptr] zeroinitializer +; COMMON-NEXT: filter [0 x ptr] zeroinitializer ; COMMON-NEXT: call void @abort() ; COMMON-NEXT: unreachable ; COMMON: unreachable: @@ -255,10 +286,10 @@ define void @cleanup_landingpad() personality ptr @__gxx_personality_v0 { ; COMMON-LABEL: define {{[^@]+}}@cleanup_landingpad ; COMMON-SAME: () #[[ATTR3]] personality ptr @__gxx_personality_v0 { ; COMMON-NEXT: invoke void @do_throw() -; COMMON-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[LPAD:%.*]] +; COMMON-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[LPAD:%.*]] ; COMMON: lpad: ; COMMON-NEXT: [[LP:%.*]] = landingpad { ptr, i32 } -; COMMON-NEXT: cleanup +; COMMON-NEXT: cleanup ; COMMON-NEXT: call void @abort() ; COMMON-NEXT: unreachable ; COMMON: unreachable: @@ -282,7 +313,7 @@ define void @cleanuppad() personality ptr @__gxx_personality_v0 { ; FNATTRS-LABEL: define {{[^@]+}}@cleanuppad ; FNATTRS-SAME: () #[[ATTR3]] personality ptr @__gxx_personality_v0 { ; FNATTRS-NEXT: invoke void @do_throw() -; FNATTRS-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CPAD:%.*]] +; FNATTRS-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CPAD:%.*]] ; FNATTRS: cpad: ; FNATTRS-NEXT: [[CP:%.*]] = cleanuppad within none [] ; FNATTRS-NEXT: call void @abort() @@ -294,7 +325,7 @@ define void @cleanuppad() personality ptr @__gxx_personality_v0 { ; ATTRIBUTOR-LABEL: define {{[^@]+}}@cleanuppad ; ATTRIBUTOR-SAME: () #[[ATTR4]] personality ptr @__gxx_personality_v0 { ; ATTRIBUTOR-NEXT: invoke void @do_throw() -; ATTRIBUTOR-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CPAD:%.*]] +; ATTRIBUTOR-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CPAD:%.*]] ; ATTRIBUTOR: cpad: ; ATTRIBUTOR-NEXT: [[CP:%.*]] = cleanuppad within none [] ; ATTRIBUTOR-NEXT: call void @abort() @@ -319,7 +350,7 @@ define void @catchswitch_cleanuppad() personality ptr @__gxx_personality_v0 { ; FNATTRS-LABEL: define {{[^@]+}}@catchswitch_cleanuppad ; FNATTRS-SAME: () #[[ATTR3]] personality ptr @__gxx_personality_v0 { ; FNATTRS-NEXT: invoke void @do_throw() -; FNATTRS-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CS:%.*]] +; FNATTRS-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CS:%.*]] ; FNATTRS: cs: ; FNATTRS-NEXT: [[TOK:%.*]] = catchswitch within none [label %catch] unwind label [[CPAD:%.*]] ; FNATTRS: catch: @@ -337,7 +368,7 @@ define void @catchswitch_cleanuppad() personality ptr @__gxx_personality_v0 { ; ATTRIBUTOR-LABEL: define {{[^@]+}}@catchswitch_cleanuppad ; ATTRIBUTOR-SAME: () #[[ATTR4]] personality ptr @__gxx_personality_v0 { ; ATTRIBUTOR-NEXT: invoke void @do_throw() -; ATTRIBUTOR-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CS:%.*]] +; ATTRIBUTOR-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CS:%.*]] ; ATTRIBUTOR: cs: ; ATTRIBUTOR-NEXT: [[TOK:%.*]] = catchswitch within none [label %catch] unwind label [[CPAD:%.*]] ; ATTRIBUTOR: catch: @@ -371,6 +402,38 @@ unreachable: unreachable } +define void @unknown_call(ptr %fn) { +; FNATTRS-LABEL: define {{[^@]+}}@unknown_call +; FNATTRS-SAME: (ptr readonly captures(none) [[FN:%.*]]) { +; FNATTRS-NEXT: call void [[FN]]() +; FNATTRS-NEXT: ret void +; +; ATTRIBUTOR-LABEL: define {{[^@]+}}@unknown_call +; ATTRIBUTOR-SAME: (ptr nofree nonnull captures(none) [[FN:%.*]]) { +; ATTRIBUTOR-NEXT: call void [[FN]]() +; ATTRIBUTOR-NEXT: ret void +; + call void %fn() + ret void +} + +define void @unknown_nounwind_call(ptr %fn) { +; FNATTRS: Function Attrs: nounwind +; FNATTRS-LABEL: define {{[^@]+}}@unknown_nounwind_call +; FNATTRS-SAME: (ptr readonly captures(none) [[FN:%.*]]) #[[ATTR2:[0-9]+]] { +; FNATTRS-NEXT: call void [[FN]]() #[[ATTR2]] +; FNATTRS-NEXT: ret void +; +; ATTRIBUTOR: Function Attrs: nounwind +; ATTRIBUTOR-LABEL: define {{[^@]+}}@unknown_nounwind_call +; ATTRIBUTOR-SAME: (ptr nofree nonnull captures(none) [[FN:%.*]]) #[[ATTR2:[0-9]+]] { +; ATTRIBUTOR-NEXT: call void [[FN]]() #[[ATTR2]] +; ATTRIBUTOR-NEXT: ret void +; + call void %fn() nounwind + ret void +} + declare i32 @__gxx_personality_v0(...) declare ptr @__cxa_begin_catch(ptr) diff --git a/llvm/test/Transforms/FunctionAttrs/sendmsg-nocallback.ll b/llvm/test/Transforms/FunctionAttrs/sendmsg-nocallback.ll index 4d5db32..04575e4 100644 --- a/llvm/test/Transforms/FunctionAttrs/sendmsg-nocallback.ll +++ b/llvm/test/Transforms/FunctionAttrs/sendmsg-nocallback.ll @@ -50,10 +50,12 @@ define internal i32 @sendmsg_rtn_is_norecurse() { } define void @user() { -; FNATTRS-LABEL: define void @user() { +; FNATTRS: Function Attrs: norecurse nounwind +; FNATTRS-LABEL: define void @user( +; FNATTRS-SAME: ) #[[ATTR1]] { ; FNATTRS-NEXT: call void @sendmsg_is_norecurse() ; FNATTRS-NEXT: call void @sendmsghalt_is_norecurse() -; FNATTRS-NEXT: call void @sendmsg_rtn_is_norecurse() +; FNATTRS-NEXT: [[TMP1:%.*]] = call i32 @sendmsg_rtn_is_norecurse() ; FNATTRS-NEXT: ret void ; ; ATTRIBUTOR: Function Attrs: norecurse nounwind @@ -61,12 +63,12 @@ define void @user() { ; ATTRIBUTOR-SAME: ) #[[ATTR1]] { ; ATTRIBUTOR-NEXT: call void @sendmsg_is_norecurse() #[[ATTR5:[0-9]+]] ; ATTRIBUTOR-NEXT: call void @sendmsghalt_is_norecurse() #[[ATTR6:[0-9]+]] -; ATTRIBUTOR-NEXT: call void @sendmsg_rtn_is_norecurse() #[[ATTR6]] +; ATTRIBUTOR-NEXT: [[TMP1:%.*]] = call i32 @sendmsg_rtn_is_norecurse() #[[ATTR6]] ; ATTRIBUTOR-NEXT: ret void ; call void @sendmsg_is_norecurse() call void @sendmsghalt_is_norecurse() - call void @sendmsg_rtn_is_norecurse() + call i32 @sendmsg_rtn_is_norecurse() ret void } ;. diff --git a/llvm/test/Transforms/HipStdPar/math-fixup.ll b/llvm/test/Transforms/HipStdPar/math-fixup.ll new file mode 100644 index 0000000..2c4622c --- /dev/null +++ b/llvm/test/Transforms/HipStdPar/math-fixup.ll @@ -0,0 +1,548 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S -passes=hipstdpar-math-fixup %s | FileCheck %s + +define void @test_acos(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_acos( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_acos_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_acos_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.acos.f64(double %dbl) + %1 = call float @llvm.acos.f32(float %flt) + ret void +} + +define void @test_acosh(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_acosh( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_acosh_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_acosh_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @acosh(double %dbl) + %1 = call float @acoshf(float %flt) + ret void +} + +define void @test_asin(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_asin( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_asin_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_asin_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.asin.f64(double %dbl) + %1 = call float @llvm.asin.f32(float %flt) + ret void +} + +define void @test_asinh(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_asinh( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_asinh_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_asinh_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @asinh(double %dbl) + %1 = call float @asinhf(float %flt) + ret void +} + +define void @test_atan(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_atan( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_atan_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_atan_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.atan.f64(double %dbl) + %1 = call float @llvm.atan.f32(float %flt) + ret void +} + +define void @test_atanh(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_atanh( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_atanh_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_atanh_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @atanh(double %dbl) + %1 = call float @atanhf(float %flt) + ret void +} + +define void @test_atan2(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_atan2( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_atan2_f64(double [[DBL]], double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_atan2_f32(float [[FLT]], float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.atan2.f64(double %dbl, double %dbl) + %1 = call float @llvm.atan2.f32(float %flt, float %flt) + ret void +} + +define void @test_cbrt(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_cbrt( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_cbrt_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_cbrt_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @cbrt(double %dbl) + %1 = call float @cbrtf(float %flt) + ret void +} + +define void @test_cos(double %dbl) { +; CHECK-LABEL: define void @test_cos( +; CHECK-SAME: double [[DBL:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_cos_f64(double [[DBL]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.cos.f64(double %dbl) + ret void +} + +define void @test_cosh(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_cosh( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_cosh_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_cosh_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.cosh.f64(double %dbl) + %1 = call float @llvm.cosh.f32(float %flt) + ret void +} + +define void @test_erf(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_erf( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_erf_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_erf_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @erf(double %dbl) + %1 = call float @erff(float %flt) + ret void +} + +define void @test_erfc(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_erfc( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_erfc_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_erfc_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @erfc(double %dbl) + %1 = call float @erfcf(float %flt) + ret void +} + +define void @test_exp(double %dbl) { +; CHECK-LABEL: define void @test_exp( +; CHECK-SAME: double [[DBL:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_exp_f64(double [[DBL]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.exp.f64(double %dbl) + ret void +} + +define void @test_exp2(double %dbl) { +; CHECK-LABEL: define void @test_exp2( +; CHECK-SAME: double [[DBL:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_exp2_f64(double [[DBL]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.exp2.f64(double %dbl) + ret void +} + +define void @test_expm1(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_expm1( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_expm1_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_expm1_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @expm1(double %dbl) + %1 = call float @expm1f(float %flt) + ret void +} + +define void @test_fdim(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_fdim( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_fdim_f64(double [[DBL]], double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_fdim_f32(float [[FLT]], float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @fdim(double %dbl, double %dbl) + %1 = call float @fdimf(float %flt, float %flt) + ret void +} + +define void @test_hypot(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_hypot( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_hypot_f64(double [[DBL]], double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_hypot_f32(float [[FLT]], float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @hypot(double %dbl, double %dbl) + %1 = call float @hypotf(float %flt, float %flt) + ret void +} + +define void @test_lgamma(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_lgamma( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_lgamma_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_lgamma_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @lgamma(double %dbl) + %1 = call float @lgammaf(float %flt) + ret void +} + +define void @test_log(double %dbl) { +; CHECK-LABEL: define void @test_log( +; CHECK-SAME: double [[DBL:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_log_f64(double [[DBL]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.log.f64(double %dbl) + ret void +} + +define void @test_log10(double %dbl) { +; CHECK-LABEL: define void @test_log10( +; CHECK-SAME: double [[DBL:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_log10_f64(double [[DBL]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.log10.f64(double %dbl) + ret void +} + +define void @test_log2(double %dbl) { +; CHECK-LABEL: define void @test_log2( +; CHECK-SAME: double [[DBL:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_log2_f64(double [[DBL]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.log2.f64(double %dbl) + ret void +} + +define void @test_log1p(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_log1p( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_log1p_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_log1p_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @log1p(double %dbl) + %1 = call float @log1pf(float %flt) + ret void +} + +define void @test_modf(double %dbl, float %flt, ptr %pdbl, ptr %pflt) { +; CHECK-LABEL: define void @test_modf( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]], ptr [[PDBL:%.*]], ptr [[PFLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = tail call { double, double } @__hipstdpar_modf_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { double, double } [[TMP0]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { double, double } [[TMP0]], 1 +; CHECK-NEXT: store double [[TMP2]], ptr [[PDBL]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = tail call { float, float } @__hipstdpar_modf_f32(float [[FLT]]) +; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { float, float } [[TMP3]], 0 +; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { float, float } [[TMP3]], 1 +; CHECK-NEXT: store float [[TMP5]], ptr [[PFLT]], align 4 +; CHECK-NEXT: ret void +; +entry: + %0 = tail call { double, double } @llvm.modf.f64(double %dbl) + %1 = extractvalue { double, double } %0, 0 + %2 = extractvalue { double, double } %0, 1 + store double %2, ptr %pdbl, align 8 + %3 = tail call { float, float } @llvm.modf.f32(float %flt) + %4 = extractvalue { float, float } %3, 0 + %5 = extractvalue { float, float } %3, 1 + store float %5, ptr %pflt, align 4 + ret void +} + +define void @test_pow(double %dbl) { +; CHECK-LABEL: define void @test_pow( +; CHECK-SAME: double [[DBL:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_pow_f64(double [[DBL]], double [[DBL]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.pow.f64(double %dbl, double %dbl) + ret void +} + +define void @test_remainder(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_remainder( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_remainder_f64(double [[DBL]], double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_remainder_f32(float [[FLT]], float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @remainder(double %dbl, double %dbl) + %1 = call float @remainderf(float %flt, float %flt) + ret void +} + +define void @test_remquo(double %dbl, float %flt, ptr %p) { +; CHECK-LABEL: define void @test_remquo( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]], ptr [[P:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_remquo_f64(double [[DBL]], double [[DBL]], ptr [[P]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_remquo_f32(float [[FLT]], float [[FLT]], ptr [[P]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @remquo(double %dbl, double %dbl, ptr %p) + %1 = call float @remquof(float %flt, float %flt, ptr %p) + ret void +} + +define void @test_sin(double %dbl) { +; CHECK-LABEL: define void @test_sin( +; CHECK-SAME: double [[DBL:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_sin_f64(double [[DBL]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.sin.f64(double %dbl) + ret void +} + +define void @test_sinh(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_sinh( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_sinh_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_sinh_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.sinh.f64(double %dbl) + %1 = call float @llvm.sinh.f32(float %flt) + ret void +} + +define void @test_tan(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_tan( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_tan_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_tan_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.tan.f64(double %dbl) + %1 = call float @llvm.tan.f32(float %flt) + ret void +} + +define void @test_tanh(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_tanh( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_tanh_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_tanh_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @llvm.tanh.f64(double %dbl) + %1 = call float @llvm.tanh.f32(float %flt) + ret void +} + +define void @test_tgamma(double %dbl, float %flt) { +; CHECK-LABEL: define void @test_tgamma( +; CHECK-SAME: double [[DBL:%.*]], float [[FLT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_tgamma_f64(double [[DBL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_tgamma_f32(float [[FLT]]) +; CHECK-NEXT: ret void +; +entry: + %0 = call double @tgamma(double %dbl) + %1 = call float @tgammaf(float %flt) + ret void +} + +@globdbl = global double 4.200000e+01 +@globflt = global float 4.200000e+01 + +define void @global_args() { +; CHECK-LABEL: define void @global_args() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[DBL:%.*]] = load double, ptr @globdbl, align 8 +; CHECK-NEXT: [[FLT:%.*]] = load float, ptr @globflt, align 4 +; CHECK-NEXT: [[TMP0:%.*]] = call double @__hipstdpar_remquo_f64(double [[DBL]], double [[DBL]], ptr @globdbl) +; CHECK-NEXT: [[TMP1:%.*]] = call float @__hipstdpar_remquo_f32(float [[FLT]], float [[FLT]], ptr @globflt) +; CHECK-NEXT: ret void +; +entry: + %dbl = load double, ptr @globdbl + %flt = load float, ptr @globflt + %1 = call double @remquo(double %dbl, double %dbl, ptr @globdbl) + %2 = call float @remquof(float %flt, float %flt, ptr @globflt) + ret void +} + +declare hidden double @remainder(double, double) + +declare hidden float @remainderf(float, float) + +declare hidden double @remquo(double, double, ptr) + +declare hidden float @remquof(float, float, ptr) + +declare hidden double @fdim(double, double) + +declare hidden float @fdimf(float, float) + +declare double @llvm.exp.f64(double) + +declare float @llvm.exp.f32(float) + +declare double @llvm.exp2.f64(double) + +declare float @llvm.exp2.f32(float) + +declare hidden double @expm1(double) + +declare hidden float @expm1f(float) + +declare double @llvm.log.f64(double) + +declare double @llvm.log10.f64(double) + +declare double @llvm.log2.f64(double) + +declare hidden double @log1p(double) + +declare hidden float @log1pf(float) + +declare { float, float } @llvm.modf.f32(float) + +declare { double, double } @llvm.modf.f64(double) + +declare double @llvm.pow.f64(double, double) + +declare hidden double @cbrt(double) + +declare hidden float @cbrtf(float) + +declare hidden double @hypot(double, double) + +declare hidden float @hypotf(float, float) + +declare double @llvm.sin.f64(double) + +declare double @llvm.cos.f64(double) + +declare double @llvm.tan.f64(double) + +declare double @llvm.asin.f64(double) + +declare double @llvm.acos.f64(double) + +declare double @llvm.atan.f64(double) + +declare double @llvm.atan2.f64(double, double) + +declare double @llvm.sinh.f64(double) + +declare double @llvm.cosh.f64(double) + +declare double @llvm.tanh.f64(double) + +declare hidden double @asinh(double) + +declare hidden float @asinhf(float) + +declare hidden double @acosh(double) + +declare hidden float @acoshf(float) + +declare hidden double @atanh(double) + +declare hidden float @atanhf(float) + +declare hidden double @erf(double) + +declare hidden float @erff(float) + +declare hidden double @erfc(double) + +declare hidden float @erfcf(float) + +declare hidden double @tgamma(double) + +declare hidden float @tgammaf(float) + +declare hidden double @lgamma(double) + +declare hidden float @lgammaf(float) diff --git a/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll b/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll new file mode 100644 index 0000000..c637481 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll @@ -0,0 +1,394 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -passes=instcombine -S < %s | FileCheck %s + +define i8 @simple_recurrence_intrinsic_smax(i8 %n, i8 %a, i8 %b) { +; CHECK-LABEL: define i8 @simple_recurrence_intrinsic_smax( +; CHECK-SAME: i8 [[N:%.*]], i8 [[A:%.*]], i8 [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[SMAX_ACC:%.*]] = phi i8 [ [[SMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: [[SMAX]] = call i8 @llvm.smax.i8(i8 [[SMAX_ACC]], i8 [[B]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret i8 [[SMAX]] +; +entry: + br label %loop + +loop: + %iv = phi i8 [ %iv.next, %loop ], [ 0, %entry ] + %smax.acc = phi i8 [ %smax, %loop ], [ %a, %entry ] + %smax = call i8 @llvm.smax.i8(i8 %smax.acc, i8 %b) + %iv.next = add nuw i8 %iv, 1 + %cmp = icmp ult i8 %iv.next, %n + br i1 %cmp, label %loop, label %exit + +exit: + ret i8 %smax +} + +define i8 @simple_recurrence_intrinsic_smin(i8 %n, i8 %a, i8 %b) { +; CHECK-LABEL: define i8 @simple_recurrence_intrinsic_smin( +; CHECK-SAME: i8 [[N:%.*]], i8 [[A:%.*]], i8 [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[SMIN_ACC:%.*]] = phi i8 [ [[SMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: [[SMIN]] = call i8 @llvm.smin.i8(i8 [[SMIN_ACC]], i8 [[B]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret i8 [[SMIN]] +; +entry: + br label %loop + +loop: + %iv = phi i8 [ %iv.next, %loop ], [ 0, %entry ] + %smin.acc = phi i8 [ %smin, %loop ], [ %a, %entry ] + %smin = call i8 @llvm.smin.i8(i8 %smin.acc, i8 %b) + %iv.next = add nuw i8 %iv, 1 + %cmp = icmp ult i8 %iv.next, %n + br i1 %cmp, label %loop, label %exit + +exit: + ret i8 %smin +} + +define i8 @simple_recurrence_intrinsic_umax(i8 %n, i8 %a, i8 %b) { +; CHECK-LABEL: define i8 @simple_recurrence_intrinsic_umax( +; CHECK-SAME: i8 [[N:%.*]], i8 [[A:%.*]], i8 [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[UMAX_ACC:%.*]] = phi i8 [ [[UMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: [[UMAX]] = call i8 @llvm.umax.i8(i8 [[UMAX_ACC]], i8 [[B]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret i8 [[UMAX]] +; +entry: + br label %loop + +loop: + %iv = phi i8 [ %iv.next, %loop ], [ 0, %entry ] + %umax.acc = phi i8 [ %umax, %loop ], [ %a, %entry ] + %umax = call i8 @llvm.umax.i8(i8 %umax.acc, i8 %b) + %iv.next = add nuw i8 %iv, 1 + %cmp = icmp ult i8 %iv.next, %n + br i1 %cmp, label %loop, label %exit + +exit: + ret i8 %umax +} + +define i8 @simple_recurrence_intrinsic_umin(i8 %n, i8 %a, i8 %b) { +; CHECK-LABEL: define i8 @simple_recurrence_intrinsic_umin( +; CHECK-SAME: i8 [[N:%.*]], i8 [[A:%.*]], i8 [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[UMIN_ACC:%.*]] = phi i8 [ [[UMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: [[UMIN]] = call i8 @llvm.umin.i8(i8 [[UMIN_ACC]], i8 [[B]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret i8 [[UMIN]] +; +entry: + br label %loop + +loop: + %iv = phi i8 [ %iv.next, %loop ], [ 0, %entry ] + %umin.acc = phi i8 [ %umin, %loop ], [ %a, %entry ] + %umin = call i8 @llvm.umin.i8(i8 %umin.acc, i8 %b) + %iv.next = add nuw i8 %iv, 1 + %cmp = icmp ult i8 %iv.next, %n + br i1 %cmp, label %loop, label %exit + +exit: + ret i8 %umin +} + +define float @simple_recurrence_intrinsic_maxnum(i32 %n, float %a, float %b) { +; CHECK-LABEL: define float @simple_recurrence_intrinsic_maxnum( +; CHECK-SAME: i32 [[N:%.*]], float [[A:%.*]], float [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[FMAX_ACC:%.*]] = phi float [ [[FMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: [[FMAX]] = call float @llvm.maxnum.f32(float [[FMAX_ACC]], float [[B]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret float [[FMAX]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] + %fmax.acc = phi float [ %fmax, %loop ], [ %a, %entry ] + %fmax = call float @llvm.maxnum.f32(float %fmax.acc, float %b) + %iv.next = add nuw i32 %iv, 1 + %cmp = icmp ult i32 %iv.next, %n + br i1 %cmp, label %loop, label %exit +exit: + ret float %fmax +} + +define float @simple_recurrence_intrinsic_minnum(i32 %n, float %a, float %b) { +; CHECK-LABEL: define float @simple_recurrence_intrinsic_minnum( +; CHECK-SAME: i32 [[N:%.*]], float [[A:%.*]], float [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[FMIN_ACC:%.*]] = phi float [ [[FMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: [[FMIN]] = call float @llvm.minnum.f32(float [[FMIN_ACC]], float [[B]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret float [[FMIN]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] + %fmin.acc = phi float [ %fmin, %loop ], [ %a, %entry ] + %fmin = call float @llvm.minnum.f32(float %fmin.acc, float %b) + %iv.next = add nuw i32 %iv, 1 + %cmp = icmp ult i32 %iv.next, %n + br i1 %cmp, label %loop, label %exit +exit: + ret float %fmin +} + +define float @simple_recurrence_intrinsic_maximum(i32 %n, float %a, float %b) { +; CHECK-LABEL: define float @simple_recurrence_intrinsic_maximum( +; CHECK-SAME: i32 [[N:%.*]], float [[A:%.*]], float [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[FMAX_ACC:%.*]] = phi float [ [[FMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: [[FMAX]] = call nnan float @llvm.maximum.f32(float [[FMAX_ACC]], float [[B]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret float [[FMAX]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] + %fmax.acc = phi float [ %fmax, %loop ], [ %a, %entry ] + %fmax = call nnan float @llvm.maximum.f32(float %fmax.acc, float %b) + %iv.next = add nuw i32 %iv, 1 + %cmp = icmp ult i32 %iv.next, %n + br i1 %cmp, label %loop, label %exit +exit: + ret float %fmax +} + +define float @simple_recurrence_intrinsic_minimum(i32 %n, float %a, float %b) { +; CHECK-LABEL: define float @simple_recurrence_intrinsic_minimum( +; CHECK-SAME: i32 [[N:%.*]], float [[A:%.*]], float [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[FMIN_ACC:%.*]] = phi float [ [[FMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: [[FMIN]] = call nnan float @llvm.minimum.f32(float [[FMIN_ACC]], float [[B]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret float [[FMIN]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] + %fmin.acc = phi float [ %fmin, %loop ], [ %a, %entry ] + %fmin = call nnan float @llvm.minimum.f32(float %fmin.acc, float %b) + %iv.next = add nuw i32 %iv, 1 + %cmp = icmp ult i32 %iv.next, %n + br i1 %cmp, label %loop, label %exit +exit: + ret float %fmin +} + +define float @simple_recurrence_intrinsic_maximumnum(i32 %n, float %a, float %b) { +; CHECK-LABEL: define float @simple_recurrence_intrinsic_maximumnum( +; CHECK-SAME: i32 [[N:%.*]], float [[A:%.*]], float [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[FMAX_ACC:%.*]] = phi float [ [[FMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: [[FMAX]] = call nnan float @llvm.maximumnum.f32(float [[FMAX_ACC]], float [[B]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret float [[FMAX]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] + %fmax.acc = phi float [ %fmax, %loop ], [ %a, %entry ] + %fmax = call nnan float @llvm.maximumnum.f32(float %fmax.acc, float %b) + %iv.next = add nuw i32 %iv, 1 + %cmp = icmp ult i32 %iv.next, %n + br i1 %cmp, label %loop, label %exit +exit: + ret float %fmax +} + +define float @simple_recurrence_intrinsic_minimumnum(i32 %n, float %a, float %b) { +; CHECK-LABEL: define float @simple_recurrence_intrinsic_minimumnum( +; CHECK-SAME: i32 [[N:%.*]], float [[A:%.*]], float [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[FMIN_ACC:%.*]] = phi float [ [[FMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: [[FMIN]] = call nnan float @llvm.minimumnum.f32(float [[FMIN_ACC]], float [[B]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret float [[FMIN]] +; +entry: + br label %loop + +loop: + %iv = phi i32 [ %iv.next, %loop ], [ 0, %entry ] + %fmin.acc = phi float [ %fmin, %loop ], [ %a, %entry ] + %fmin = call nnan float @llvm.minimumnum.f32(float %fmin.acc, float %b) + %iv.next = add nuw i32 %iv, 1 + %cmp = icmp ult i32 %iv.next, %n + br i1 %cmp, label %loop, label %exit +exit: + ret float %fmin +} + +define i8 @simple_recurrence_intrinsic_multiuse_phi(i8 %n, i8 %a, i8 %b) { +; CHECK-LABEL: define i8 @simple_recurrence_intrinsic_multiuse_phi( +; CHECK-SAME: i8 [[N:%.*]], i8 [[A:%.*]], i8 [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[UMAX_ACC:%.*]] = phi i8 [ [[UMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: call void @use(i8 [[UMAX_ACC]]) +; CHECK-NEXT: [[UMAX]] = call i8 @llvm.umax.i8(i8 [[UMAX_ACC]], i8 [[B]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret i8 [[UMAX]] +; +entry: + br label %loop + +loop: + %iv = phi i8 [ %iv.next, %loop ], [ 0, %entry ] + %umax.acc = phi i8 [ %umax, %loop ], [ %a, %entry ] + call void @use(i8 %umax.acc) + %umax = call i8 @llvm.umax.i8(i8 %umax.acc, i8 %b) + %iv.next = add nuw i8 %iv, 1 + %cmp = icmp ult i8 %iv.next, %n + br i1 %cmp, label %loop, label %exit + +exit: + ret i8 %umax +} + +; Negative tests. + +define i8 @simple_recurrence_intrinsic_uadd_sat(i8 %n, i8 %a, i8 %b) { +; CHECK-LABEL: define i8 @simple_recurrence_intrinsic_uadd_sat( +; CHECK-SAME: i8 [[N:%.*]], i8 [[A:%.*]], i8 [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[UADD_SAT_ACC:%.*]] = phi i8 [ [[UADD_SAT:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: [[UADD_SAT]] = call i8 @llvm.uadd.sat.i8(i8 [[UADD_SAT_ACC]], i8 [[B]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret i8 [[UADD_SAT]] +; +entry: + br label %loop + +loop: + %iv = phi i8 [ %iv.next, %loop ], [ 0, %entry ] + %uadd.sat.acc = phi i8 [ %uadd.sat, %loop ], [ %a, %entry ] + %uadd.sat = call i8 @llvm.uadd.sat.i8(i8 %uadd.sat.acc, i8 %b) + %iv.next = add nuw i8 %iv, 1 + %cmp = icmp ult i8 %iv.next, %n + br i1 %cmp, label %loop, label %exit + +exit: + ret i8 %uadd.sat +} + +define i8 @simple_recurrence_intrinsic_arg_loop_variant(i8 %n, i8 %a) { +; CHECK-LABEL: define i8 @simple_recurrence_intrinsic_arg_loop_variant( +; CHECK-SAME: i8 [[N:%.*]], i8 [[A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[UMAX_ACC:%.*]] = phi i8 [ [[UMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: [[B:%.*]] = xor i8 [[IV]], 42 +; CHECK-NEXT: [[UMAX]] = call i8 @llvm.umax.i8(i8 [[UMAX_ACC]], i8 [[B]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret i8 [[UMAX]] +; +entry: + br label %loop + +loop: + %iv = phi i8 [ %iv.next, %loop ], [ 0, %entry ] + %umax.acc = phi i8 [ %umax, %loop ], [ %a, %entry ] + %b = xor i8 %iv, 42 + %umax = call i8 @llvm.umax.i8(i8 %umax.acc, i8 %b) + %iv.next = add nuw i8 %iv, 1 + %cmp = icmp ult i8 %iv.next, %n + br i1 %cmp, label %loop, label %exit + +exit: + ret i8 %umax +} + +declare void @use(i8) diff --git a/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll b/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll index 33fa2c3..f83352c 100644 --- a/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll +++ b/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll @@ -959,8 +959,8 @@ define <3 x i31> @wide_splat3(<3 x i33> %x) { define <8 x i8> @wide_lengthening_splat(<4 x i16> %v) { ; CHECK-LABEL: @wide_lengthening_splat( -; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i16> [[V:%.*]], <4 x i16> poison, <8 x i32> zeroinitializer -; CHECK-NEXT: [[TR:%.*]] = trunc <8 x i16> [[SHUF]] to <8 x i8> +; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i16> [[V:%.*]] to <4 x i8> +; CHECK-NEXT: [[TR:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <8 x i32> zeroinitializer ; CHECK-NEXT: ret <8 x i8> [[TR]] ; %shuf = shufflevector <4 x i16> %v, <4 x i16> %v, <8 x i32> zeroinitializer diff --git a/llvm/test/Transforms/InstCombine/trunc.ll b/llvm/test/Transforms/InstCombine/trunc.ll index a85ce71..dfe9d94 100644 --- a/llvm/test/Transforms/InstCombine/trunc.ll +++ b/llvm/test/Transforms/InstCombine/trunc.ll @@ -960,8 +960,8 @@ define <3 x i31> @wide_splat3(<3 x i33> %x) { define <8 x i8> @wide_lengthening_splat(<4 x i16> %v) { ; CHECK-LABEL: @wide_lengthening_splat( -; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i16> [[V:%.*]], <4 x i16> poison, <8 x i32> zeroinitializer -; CHECK-NEXT: [[TR:%.*]] = trunc <8 x i16> [[SHUF]] to <8 x i8> +; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i16> [[V:%.*]] to <4 x i8> +; CHECK-NEXT: [[TR:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <8 x i32> zeroinitializer ; CHECK-NEXT: ret <8 x i8> [[TR]] ; %shuf = shufflevector <4 x i16> %v, <4 x i16> %v, <8 x i32> zeroinitializer @@ -969,6 +969,19 @@ define <8 x i8> @wide_lengthening_splat(<4 x i16> %v) { ret <8 x i8> %tr } +; This is a negative test, we expect the trunc to remain after the shuffle as it +; might not be beneficial to preform trunc on a wider type +define <4 x i8> @wide_shortening_splat(<8 x i16> %v) { +; CHECK-LABEL: @wide_shortening_splat( +; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <8 x i16> [[V:%.*]], <8 x i16> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TR:%.*]] = trunc <4 x i16> [[SHUF]] to <4 x i8> +; CHECK-NEXT: ret <4 x i8> [[TR]] +; + %shuf = shufflevector <8 x i16> %v, <8 x i16> %v, <4 x i32> zeroinitializer + %tr = trunc <4 x i16> %shuf to <4 x i8> + ret <4 x i8> %tr +} + define <2 x i8> @narrow_add_vec_constant(<2 x i32> %x) { ; CHECK-LABEL: @narrow_add_vec_constant( ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[X:%.*]] to <2 x i8> diff --git a/llvm/test/Transforms/InterleavedAccess/RISCV/interleaved-accesses.ll b/llvm/test/Transforms/InterleavedAccess/RISCV/interleaved-accesses.ll index 672e949..b505917 100644 --- a/llvm/test/Transforms/InterleavedAccess/RISCV/interleaved-accesses.ll +++ b/llvm/test/Transforms/InterleavedAccess/RISCV/interleaved-accesses.ll @@ -874,3 +874,79 @@ define void @load_factor2_fp128(ptr %ptr) { %v1 = shufflevector <4 x fp128> %interleaved.vec, <4 x fp128> poison, <2 x i32> <i32 1, i32 3> ret void } + +define void @load_factor2_f32(ptr %ptr) { +; RV32-LABEL: @load_factor2_f32( +; RV32-NEXT: [[TMP1:%.*]] = call { <8 x float>, <8 x float> } @llvm.riscv.seg2.load.mask.v8f32.p0.i32(ptr [[PTR:%.*]], <8 x i1> splat (i1 true), i32 8) +; RV32-NEXT: [[TMP2:%.*]] = extractvalue { <8 x float>, <8 x float> } [[TMP1]], 1 +; RV32-NEXT: [[TMP3:%.*]] = extractvalue { <8 x float>, <8 x float> } [[TMP1]], 0 +; RV32-NEXT: ret void +; +; RV64-LABEL: @load_factor2_f32( +; RV64-NEXT: [[TMP1:%.*]] = call { <8 x float>, <8 x float> } @llvm.riscv.seg2.load.mask.v8f32.p0.i64(ptr [[PTR:%.*]], <8 x i1> splat (i1 true), i64 8) +; RV64-NEXT: [[TMP2:%.*]] = extractvalue { <8 x float>, <8 x float> } [[TMP1]], 1 +; RV64-NEXT: [[TMP3:%.*]] = extractvalue { <8 x float>, <8 x float> } [[TMP1]], 0 +; RV64-NEXT: ret void +; + %interleaved.vec = load <16 x float>, ptr %ptr + %v0 = shufflevector <16 x float> %interleaved.vec, <16 x float> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> + %v1 = shufflevector <16 x float> %interleaved.vec, <16 x float> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> + ret void +} + +define void @load_factor2_f64(ptr %ptr) { +; RV32-LABEL: @load_factor2_f64( +; RV32-NEXT: [[TMP1:%.*]] = call { <8 x double>, <8 x double> } @llvm.riscv.seg2.load.mask.v8f64.p0.i32(ptr [[PTR:%.*]], <8 x i1> splat (i1 true), i32 8) +; RV32-NEXT: [[TMP2:%.*]] = extractvalue { <8 x double>, <8 x double> } [[TMP1]], 1 +; RV32-NEXT: [[TMP3:%.*]] = extractvalue { <8 x double>, <8 x double> } [[TMP1]], 0 +; RV32-NEXT: ret void +; +; RV64-LABEL: @load_factor2_f64( +; RV64-NEXT: [[TMP1:%.*]] = call { <8 x double>, <8 x double> } @llvm.riscv.seg2.load.mask.v8f64.p0.i64(ptr [[PTR:%.*]], <8 x i1> splat (i1 true), i64 8) +; RV64-NEXT: [[TMP2:%.*]] = extractvalue { <8 x double>, <8 x double> } [[TMP1]], 1 +; RV64-NEXT: [[TMP3:%.*]] = extractvalue { <8 x double>, <8 x double> } [[TMP1]], 0 +; RV64-NEXT: ret void +; + %interleaved.vec = load <16 x double>, ptr %ptr + %v0 = shufflevector <16 x double> %interleaved.vec, <16 x double> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> + %v1 = shufflevector <16 x double> %interleaved.vec, <16 x double> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> + ret void +} + +define void @load_factor2_bf16(ptr %ptr) { +; RV32-LABEL: @load_factor2_bf16( +; RV32-NEXT: [[INTERLEAVED_VEC:%.*]] = load <16 x bfloat>, ptr [[PTR:%.*]], align 32 +; RV32-NEXT: [[V0:%.*]] = shufflevector <16 x bfloat> [[INTERLEAVED_VEC]], <16 x bfloat> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> +; RV32-NEXT: [[V1:%.*]] = shufflevector <16 x bfloat> [[INTERLEAVED_VEC]], <16 x bfloat> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> +; RV32-NEXT: ret void +; +; RV64-LABEL: @load_factor2_bf16( +; RV64-NEXT: [[INTERLEAVED_VEC:%.*]] = load <16 x bfloat>, ptr [[PTR:%.*]], align 32 +; RV64-NEXT: [[V0:%.*]] = shufflevector <16 x bfloat> [[INTERLEAVED_VEC]], <16 x bfloat> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> +; RV64-NEXT: [[V1:%.*]] = shufflevector <16 x bfloat> [[INTERLEAVED_VEC]], <16 x bfloat> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> +; RV64-NEXT: ret void +; + %interleaved.vec = load <16 x bfloat>, ptr %ptr + %v0 = shufflevector <16 x bfloat> %interleaved.vec, <16 x bfloat> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> + %v1 = shufflevector <16 x bfloat> %interleaved.vec, <16 x bfloat> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> + ret void +} + +define void @load_factor2_f16(ptr %ptr) { +; RV32-LABEL: @load_factor2_f16( +; RV32-NEXT: [[INTERLEAVED_VEC:%.*]] = load <16 x half>, ptr [[PTR:%.*]], align 32 +; RV32-NEXT: [[V0:%.*]] = shufflevector <16 x half> [[INTERLEAVED_VEC]], <16 x half> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> +; RV32-NEXT: [[V1:%.*]] = shufflevector <16 x half> [[INTERLEAVED_VEC]], <16 x half> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> +; RV32-NEXT: ret void +; +; RV64-LABEL: @load_factor2_f16( +; RV64-NEXT: [[INTERLEAVED_VEC:%.*]] = load <16 x half>, ptr [[PTR:%.*]], align 32 +; RV64-NEXT: [[V0:%.*]] = shufflevector <16 x half> [[INTERLEAVED_VEC]], <16 x half> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> +; RV64-NEXT: [[V1:%.*]] = shufflevector <16 x half> [[INTERLEAVED_VEC]], <16 x half> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> +; RV64-NEXT: ret void +; + %interleaved.vec = load <16 x half>, ptr %ptr + %v0 = shufflevector <16 x half> %interleaved.vec, <16 x half> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14> + %v1 = shufflevector <16 x half> %interleaved.vec, <16 x half> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15> + ret void +} diff --git a/llvm/test/Transforms/LoopFusion/sunk-phi-nodes.ll b/llvm/test/Transforms/LoopFusion/sunk-phi-nodes.ll new file mode 100644 index 0000000..36c6bdd --- /dev/null +++ b/llvm/test/Transforms/LoopFusion/sunk-phi-nodes.ll @@ -0,0 +1,65 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -passes=loop-fusion -S < %s 2>&1 | FileCheck %s +define dso_local i32 @check_sunk_phi_nodes() { +; CHECK-LABEL: define dso_local i32 @check_sunk_phi_nodes() { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[SUM1_02:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD:%.*]], %[[FOR_INC6:.*]] ] +; CHECK-NEXT: [[I_01:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INC:%.*]], %[[FOR_INC6]] ] +; CHECK-NEXT: [[I1_04:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INC7:%.*]], %[[FOR_INC6]] ] +; CHECK-NEXT: [[SUM2_03:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD5:%.*]], %[[FOR_INC6]] ] +; CHECK-NEXT: [[ADD]] = add nsw i32 [[SUM1_02]], [[I_01]] +; CHECK-NEXT: br label %[[FOR_INC:.*]] +; CHECK: [[FOR_INC]]: +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I1_04]], [[I1_04]] +; CHECK-NEXT: [[ADD5]] = add nsw i32 [[SUM2_03]], [[MUL]] +; CHECK-NEXT: br label %[[FOR_INC6]] +; CHECK: [[FOR_INC6]]: +; CHECK-NEXT: [[INC]] = add nsw i32 [[I_01]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], 10 +; CHECK-NEXT: [[INC7]] = add nsw i32 [[I1_04]], 1 +; CHECK-NEXT: [[CMP3:%.*]] = icmp slt i32 [[INC7]], 10 +; CHECK-NEXT: br i1 [[CMP3]], label %[[FOR_BODY]], label %[[FOR_END8:.*]] +; CHECK: [[FOR_END8]]: +; CHECK-NEXT: [[SUM2_0_LCSSA:%.*]] = phi i32 [ [[ADD5]], %[[FOR_INC6]] ] +; CHECK-NEXT: [[SUM1_0_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_INC6]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[SUM1_0_LCSSA]], [[SUM2_0_LCSSA]] +; CHECK-NEXT: ret i32 [[TMP0]] +; +entry: + br label %for.body + +for.body: ; preds = %entry, %for.inc + %sum1.02 = phi i32 [ 0, %entry ], [ %add, %for.inc ] + %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.inc ] + %add = add nsw i32 %sum1.02, %i.01 + br label %for.inc + +for.inc: ; preds = %for.body + %inc = add nsw i32 %i.01, 1 + %cmp = icmp slt i32 %inc, 10 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.inc + %sum1.0.lcssa = phi i32 [ %add, %for.inc ] + br label %for.body4 + +for.body4: ; preds = %for.end, %for.inc6 + %i1.04 = phi i32 [ 0, %for.end ], [ %inc7, %for.inc6 ] + %sum2.03 = phi i32 [ 0, %for.end ], [ %add5, %for.inc6 ] + %mul = mul nsw i32 %i1.04, %i1.04 + %add5 = add nsw i32 %sum2.03, %mul + br label %for.inc6 + +for.inc6: ; preds = %for.body4 + %inc7 = add nsw i32 %i1.04, 1 + %cmp3 = icmp slt i32 %inc7, 10 + br i1 %cmp3, label %for.body4, label %for.end8 + +for.end8: ; preds = %for.inc6 + %sum2.0.lcssa = phi i32 [ %add5, %for.inc6 ] + %0 = add i32 %sum1.0.lcssa, %sum2.0.lcssa + ret i32 %0 +} + diff --git a/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll b/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll index 357a7b6..65aaf72 100644 --- a/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll +++ b/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll @@ -96,3 +96,68 @@ loop.3: exit: ret void } + +declare i1 @cond() + +define ptr @test_lcssa_reuse_preserve_lcssa() { +; CHECK-LABEL: define ptr @test_lcssa_reuse_preserve_lcssa() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[LOOP_0_HEADER:.*]] +; CHECK: [[LOOP_0_HEADER]]: +; CHECK-NEXT: br label %[[LOOP_1:.*]] +; CHECK: [[LOOP_1]]: +; CHECK-NEXT: [[IV_1:%.*]] = phi ptr [ null, %[[LOOP_0_HEADER]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP_1]] ] +; CHECK-NEXT: [[IV_1_NEXT]] = getelementptr i8, ptr [[IV_1]], i64 1 +; CHECK-NEXT: [[EC_1:%.*]] = call i1 @cond() +; CHECK-NEXT: br i1 [[EC_1]], label %[[THEN:.*]], label %[[LOOP_1]] +; CHECK: [[THEN]]: +; CHECK-NEXT: [[IV_1_LCSSA1:%.*]] = phi ptr [ [[IV_1]], %[[LOOP_1]] ] +; CHECK-NEXT: [[C_2:%.*]] = call i1 @cond() +; CHECK-NEXT: br i1 [[C_2]], label %[[LOOP_2_PREHEADER:.*]], label %[[LOOP_0_LATCH:.*]] +; CHECK: [[LOOP_2_PREHEADER]]: +; CHECK-NEXT: [[IV_1_LCSSA:%.*]] = phi ptr [ [[IV_1_LCSSA1]], %[[THEN]] ] +; CHECK-NEXT: [[IV_1_LCSSA_LCSSA:%.*]] = phi ptr [ [[IV_1_LCSSA1]], %[[THEN]] ] +; CHECK-NEXT: [[STRLEN:%.*]] = call i64 @strlen(ptr null) +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[IV_1_LCSSA]], i64 1 +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[TMP0]], i64 [[STRLEN]] +; CHECK-NEXT: br label %[[LOOP_2:.*]] +; CHECK: [[LOOP_2]]: +; CHECK-NEXT: [[IV_2:%.*]] = phi ptr [ [[RES:%.*]], %[[LOOP_2]] ], [ [[IV_1_LCSSA_LCSSA]], %[[LOOP_2_PREHEADER]] ] +; CHECK-NEXT: [[RES]] = getelementptr i8, ptr [[IV_2]], i64 1 +; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[IV_1_LCSSA_LCSSA]], align 1 +; CHECK-NEXT: [[EC_2:%.*]] = icmp eq i8 [[L]], 0 +; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[LOOP_2]] +; CHECK: [[LOOP_0_LATCH]]: +; CHECK-NEXT: br label %[[LOOP_0_HEADER]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret ptr [[SCEVGEP]] +; +entry: + br label %loop.0.header + +loop.0.header: + br label %loop.1 + +loop.1: + %iv.1 = phi ptr [ null, %loop.0.header ], [ %iv.1.next, %loop.1 ] + %iv.1.next = getelementptr i8, ptr %iv.1, i64 1 + %ec.1 = call i1 @cond() + br i1 %ec.1, label %then, label %loop.1 + +then: + %c.2 = call i1 @cond() + br i1 %c.2, label %loop.2, label %loop.0.latch + +loop.2: + %iv.2 = phi ptr [ %res, %loop.2 ], [ %iv.1, %then ] + %res = getelementptr i8, ptr %iv.2, i64 1 + %l = load i8, ptr %iv.1, align 1 + %ec.2 = icmp eq i8 %l, 0 + br i1 %ec.2, label %exit, label %loop.2 + +loop.0.latch: + br label %loop.0.header + +exit: + ret ptr %res +} diff --git a/llvm/test/Transforms/LoopUnroll/Hexagon/reuse-lcssa-phi-scev-expansion.ll b/llvm/test/Transforms/LoopUnroll/Hexagon/reuse-lcssa-phi-scev-expansion.ll new file mode 100644 index 0000000..f74fb14 --- /dev/null +++ b/llvm/test/Transforms/LoopUnroll/Hexagon/reuse-lcssa-phi-scev-expansion.ll @@ -0,0 +1,108 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -p loop-unroll -S %s | FileCheck %s + +target triple = "hexagon-unknown-linux" + +declare void @foo() + +define void @preserve_lcssa_when_reusing_existing_phi() { +; CHECK-LABEL: define void @preserve_lcssa_when_reusing_existing_phi() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[LOOP_1_HEADER:.*]] +; CHECK: [[LOOP_1_HEADER]]: +; CHECK-NEXT: br label %[[LOOP_2_HEADER:.*]] +; CHECK: [[LOOP_2_HEADER]]: +; CHECK-NEXT: br label %[[LOOP_3:.*]] +; CHECK: [[LOOP_3]]: +; CHECK-NEXT: [[IV_3:%.*]] = phi i32 [ [[IV_3_NEXT:%.*]], %[[LOOP_3]] ], [ 0, %[[LOOP_2_HEADER]] ] +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: [[IV_3_NEXT]] = add i32 [[IV_3]], 1 +; CHECK-NEXT: br i1 false, label %[[PH:.*]], label %[[LOOP_3]] +; CHECK: [[PH]]: +; CHECK-NEXT: [[IV_3_LCSSA:%.*]] = phi i32 [ [[IV_3]], %[[LOOP_3]] ] +; CHECK-NEXT: br i1 true, label %[[LOOP_2_LATCH:.*]], label %[[LOOP_4_PREHEADER:.*]] +; CHECK: [[LOOP_4_PREHEADER]]: +; CHECK-NEXT: [[IV_3_LCSSA_LCSSA1:%.*]] = phi i32 [ [[IV_3_LCSSA]], %[[PH]] ] +; CHECK-NEXT: [[IV_3_LCSSA_LCSSA:%.*]] = phi i32 [ [[IV_3_LCSSA]], %[[PH]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[IV_3_LCSSA_LCSSA1]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], -1 +; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[TMP1]], 7 +; CHECK-NEXT: [[TMP3:%.*]] = icmp ult i32 [[TMP2]], 7 +; CHECK-NEXT: br i1 [[TMP3]], label %[[LOOP_1_LATCH_UNR_LCSSA:.*]], label %[[LOOP_4_PREHEADER_NEW:.*]] +; CHECK: [[LOOP_4_PREHEADER_NEW]]: +; CHECK-NEXT: br label %[[LOOP_4:.*]] +; CHECK: [[LOOP_2_LATCH]]: +; CHECK-NEXT: br label %[[LOOP_2_HEADER]] +; CHECK: [[LOOP_4]]: +; CHECK-NEXT: [[IV_4:%.*]] = phi i32 [ 0, %[[LOOP_4_PREHEADER_NEW]] ], [ [[INC_I_7:%.*]], %[[LOOP_4]] ] +; CHECK-NEXT: [[NITER:%.*]] = phi i32 [ 0, %[[LOOP_4_PREHEADER_NEW]] ], [ [[NITER_NEXT_7:%.*]], %[[LOOP_4]] ] +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: [[INC_I_7]] = add nuw nsw i32 [[IV_4]], 8 +; CHECK-NEXT: [[NITER_NEXT_7]] = add nuw nsw i32 [[NITER]], 8 +; CHECK-NEXT: br i1 true, label %[[LOOP_1_LATCH_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP_4]] +; CHECK: [[LOOP_1_LATCH_UNR_LCSSA_LOOPEXIT]]: +; CHECK-NEXT: [[IV_4_UNR_PH:%.*]] = phi i32 [ [[INC_I_7]], %[[LOOP_4]] ] +; CHECK-NEXT: br label %[[LOOP_1_LATCH_UNR_LCSSA]] +; CHECK: [[LOOP_1_LATCH_UNR_LCSSA]]: +; CHECK-NEXT: [[IV_4_UNR:%.*]] = phi i32 [ 0, %[[LOOP_4_PREHEADER]] ], [ [[IV_4_UNR_PH]], %[[LOOP_1_LATCH_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_4_EPIL_PREHEADER:.*]], label %[[LOOP_1_LATCH:.*]] +; CHECK: [[LOOP_4_EPIL_PREHEADER]]: +; CHECK-NEXT: br label %[[LOOP_4_EPIL:.*]] +; CHECK: [[LOOP_4_EPIL]]: +; CHECK-NEXT: [[IV_4_EPIL:%.*]] = phi i32 [ [[INC_I_EPIL:%.*]], %[[LOOP_4_EPIL]] ], [ [[IV_4_UNR]], %[[LOOP_4_EPIL_PREHEADER]] ] +; CHECK-NEXT: [[EPIL_ITER:%.*]] = phi i32 [ 0, %[[LOOP_4_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], %[[LOOP_4_EPIL]] ] +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: [[INC_I_EPIL]] = add i32 [[IV_4_EPIL]], 1 +; CHECK-NEXT: [[EC_EPIL:%.*]] = icmp eq i32 [[IV_4_EPIL]], [[IV_3_LCSSA_LCSSA]] +; CHECK-NEXT: [[EPIL_ITER_NEXT]] = add i32 [[EPIL_ITER]], 1 +; CHECK-NEXT: [[EPIL_ITER_CMP:%.*]] = icmp ne i32 [[EPIL_ITER_NEXT]], [[XTRAITER]] +; CHECK-NEXT: br i1 [[EPIL_ITER_CMP]], label %[[LOOP_4_EPIL]], label %[[LOOP_1_LATCH_EPILOG_LCSSA:.*]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[LOOP_1_LATCH_EPILOG_LCSSA]]: +; CHECK-NEXT: br label %[[LOOP_1_LATCH]] +; CHECK: [[LOOP_1_LATCH]]: +; CHECK-NEXT: br label %[[LOOP_1_HEADER]] +; +entry: + br label %loop.1.header + +loop.1.header: + br label %loop.2.header + +loop.2.header: + br label %loop.3 + +loop.3: + %iv.3 = phi i32 [ %iv.3.next, %loop.3 ], [ 0, %loop.2.header ] + call void @foo() + %iv.3.next = add i32 %iv.3, 1 + br i1 false, label %ph, label %loop.3 + +ph: + br i1 true, label %loop.2.latch, label %loop.4 + +loop.2.latch: + br label %loop.2.header + +loop.4: + %iv.4 = phi i32 [ 0, %ph ], [ %inc.i, %loop.4 ] + call void @foo() + %inc.i = add i32 %iv.4, 1 + %ec = icmp eq i32 %iv.4, %iv.3 + br i1 %ec, label %loop.1.latch, label %loop.4 + +loop.1.latch: + br label %loop.1.header +} +;. +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]} +; CHECK: [[META1]] = !{!"llvm.loop.unroll.disable"} +;. diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll index 18cc3a8..0f407cd 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll @@ -148,6 +148,104 @@ exit: ret void } +define void @main_vf_vscale_x_2_no_epi_iteration(ptr %A) #0 vscale_range(8, 8) { +; CHECK-LABEL: @main_vf_vscale_x_2_no_epi_iteration( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i64 [[TMP8]] +; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP6]], align 1 +; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP9]], align 1 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; +; CHECK-VF8-LABEL: @main_vf_vscale_x_2_no_epi_iteration( +; CHECK-VF8-NEXT: iter.check: +; CHECK-VF8-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] +; CHECK-VF8: vector.main.loop.iter.check: +; CHECK-VF8-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 +; CHECK-VF8-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] +; CHECK-VF8-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-VF8: vector.ph: +; CHECK-VF8-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 +; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 +; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK-VF8: vector.body: +; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-VF8-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] +; CHECK-VF8-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 +; CHECK-VF8-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP6]], i64 [[TMP8]] +; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP6]], align 1 +; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP9]], align 1 +; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-VF8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-VF8-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-VF8: middle.block: +; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] +; CHECK-VF8: vec.epilog.iter.check: +; CHECK-VF8-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 1024, [[N_VEC]] +; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK-VF8: vec.epilog.ph: +; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-VF8-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] +; CHECK-VF8: vec.epilog.vector.body: +; CHECK-VF8-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-VF8-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX1]] +; CHECK-VF8-NEXT: store <8 x i64> splat (i64 1), ptr [[TMP11]], align 1 +; CHECK-VF8-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8 +; CHECK-VF8-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024 +; CHECK-VF8-NEXT: br i1 [[TMP14]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-VF8: vec.epilog.middle.block: +; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] +; CHECK-VF8: vec.epilog.scalar.ph: +; CHECK-VF8-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] +; CHECK-VF8-NEXT: br label [[FOR_BODY:%.*]] +; CHECK-VF8: for.body: +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i64, ptr %A, i64 %iv + store i64 1, ptr %arrayidx, align 1 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp ne i64 %iv.next, 1024 + br i1 %exitcond, label %for.body, label %exit + +exit: + ret void +} ; DEBUG: LV: Checking a loop in 'main_vf_vscale_x_2' ; DEBUG: Create Skeleton for epilogue vectorized loop (first pass) @@ -163,20 +261,21 @@ exit: ; fixed-width VF=8 for the epilogue if the vectors are known to be ; sufficiently wide. This information can be deduced from vscale_range or ; VScaleForTuning (set by mcpu/mtune). -define void @main_vf_vscale_x_2(ptr %A) #0 vscale_range(8, 8) { +define void @main_vf_vscale_x_2(ptr %A, i64 %n) #0 vscale_range(8, 8) { ; CHECK-LABEL: @main_vf_vscale_x_2( ; CHECK-NEXT: iter.check: -; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 8 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] ; CHECK: vector.main.loop.iter.check: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], [[TMP1]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -190,44 +289,48 @@ define void @main_vf_vscale_x_2(ptr %A) #0 vscale_range(8, 8) { ; CHECK-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP17]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK: vec.epilog.iter.check: -; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 1024, [[N_VEC]] +; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] ; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 ; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: -; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] -; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] +; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[SCALAR_PH]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[N]], 8 +; CHECK-NEXT: [[N_VEC3:%.*]] = sub i64 [[N]], [[N_MOD_VF2]] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: vec.epilog.vector.body: -; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX1]] -; CHECK-NEXT: store <8 x i64> splat (i64 1), ptr [[TMP20]], align 1 -; CHECK-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8 -; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024 -; CHECK-NEXT: br i1 [[TMP22]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: [[INDEX4:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT5:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX4]] +; CHECK-NEXT: store <8 x i64> splat (i64 1), ptr [[TMP19]], align 1 +; CHECK-NEXT: [[INDEX_NEXT5]] = add nuw i64 [[INDEX4]], 8 +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT5]], [[N_VEC3]] +; CHECK-NEXT: br i1 [[TMP20]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: vec.epilog.middle.block: -; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] +; CHECK-NEXT: [[CMP_N6:%.*]] = icmp eq i64 [[N]], [[N_VEC3]] +; CHECK-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK: vec.epilog.scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] -; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC3]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[SCALAR_PH]] ], [ 0, [[ITER_CHECK:%.*]] ] +; CHECK-NEXT: br label [[FOR_BODY1:%.*]] ; CHECK: for.body: ; ; CHECK-VF8-LABEL: @main_vf_vscale_x_2( ; CHECK-VF8-NEXT: iter.check: -; CHECK-VF8-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] +; CHECK-VF8-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N:%.*]], 8 +; CHECK-VF8-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] ; CHECK-VF8: vector.main.loop.iter.check: ; CHECK-VF8-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-VF8-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 -; CHECK-VF8-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] +; CHECK-VF8-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] ; CHECK-VF8-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-VF8: vector.ph: ; CHECK-VF8-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] +; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]] @@ -241,28 +344,31 @@ define void @main_vf_vscale_x_2(ptr %A) #0 vscale_range(8, 8) { ; CHECK-VF8-NEXT: store <vscale x 2 x i64> splat (i64 1), ptr [[TMP17]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-VF8-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-VF8-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-VF8: middle.block: -; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK-VF8: vec.epilog.iter.check: -; CHECK-VF8-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 1024, [[N_VEC]] +; CHECK-VF8-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] ; CHECK-VF8-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 ; CHECK-VF8-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK-VF8: vec.epilog.ph: ; CHECK-VF8-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-VF8-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[N]], 8 +; CHECK-VF8-NEXT: [[N_VEC3:%.*]] = sub i64 [[N]], [[N_MOD_VF2]] ; CHECK-VF8-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; CHECK-VF8: vec.epilog.vector.body: ; CHECK-VF8-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT2:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] ; CHECK-VF8-NEXT: [[TMP20:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX1]] ; CHECK-VF8-NEXT: store <8 x i64> splat (i64 1), ptr [[TMP20]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 8 -; CHECK-VF8-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 1024 -; CHECK-VF8-NEXT: br i1 [[TMP22]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-VF8-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT2]], [[N_VEC3]] +; CHECK-VF8-NEXT: br i1 [[TMP19]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK-VF8: vec.epilog.middle.block: -; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] +; CHECK-VF8-NEXT: [[CMP_N6:%.*]] = icmp eq i64 [[N]], [[N_VEC3]] +; CHECK-VF8-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK-VF8: vec.epilog.scalar.ph: -; CHECK-VF8-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] +; CHECK-VF8-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC3]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] ; CHECK-VF8-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-VF8: for.body: ; @@ -274,7 +380,7 @@ for.body: %arrayidx = getelementptr inbounds i64, ptr %A, i64 %iv store i64 1, ptr %arrayidx, align 1 %iv.next = add nuw nsw i64 %iv, 1 - %exitcond = icmp ne i64 %iv.next, 1024 + %exitcond = icmp ne i64 %iv.next, %n br i1 %exitcond, label %for.body, label %exit exit: @@ -313,7 +419,7 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP19]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 10000, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] @@ -340,7 +446,7 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-NEXT: store <vscale x 8 x i8> zeroinitializer, ptr [[TMP28]], align 1 ; CHECK-NEXT: [[INDEX_NEXT8]] = add nuw i64 [[INDEX7]], [[TMP26]] ; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT8]], [[N_VEC3]] -; CHECK-NEXT: br i1 [[TMP30]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP30]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: [[CMP_N6:%.*]] = icmp eq i64 10000, [[N_VEC3]] ; CHECK-NEXT: br i1 [[CMP_N6]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -376,7 +482,7 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-VF8-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP17]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-VF8-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-VF8-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK-VF8: middle.block: ; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 10000, [[N_VEC]] ; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] @@ -395,7 +501,7 @@ define void @test_pr57912_pointer_induction(ptr %start) #0 { ; CHECK-VF8-NEXT: store <8 x i8> zeroinitializer, ptr [[TMP20]], align 1 ; CHECK-VF8-NEXT: [[INDEX_NEXT4]] = add nuw i64 [[INDEX3]], 8 ; CHECK-VF8-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT4]], 10000 -; CHECK-VF8-NEXT: br i1 [[TMP22]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-VF8-NEXT: br i1 [[TMP22]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-VF8: vec.epilog.middle.block: ; CHECK-VF8-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK-VF8: vec.epilog.scalar.ph: @@ -420,4 +526,307 @@ exit: ret void } +; Loop with vscale-based trip count vscale x 1033. +define void @trip_count_vscale(ptr noalias %a, ptr noalias %b) vscale_range(1, 16) #0 { +; CHECK-LABEL: @trip_count_vscale( +; CHECK-NEXT: iter.check: +; CHECK-NEXT: [[V:%.*]] = tail call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[N:%.*]] = mul nuw nsw i64 [[V]], 1033 +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] +; CHECK: vector.main.loop.iter.check: +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 +; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], [[TMP3]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP5]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8 +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[A:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i64 [[TMP11]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP8]], align 4 +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP12]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[B:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP16]] +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x float>, ptr [[TMP13]], align 4 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x float>, ptr [[TMP17]], align 4 +; CHECK-NEXT: [[TMP18:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD3]] +; CHECK-NEXT: [[TMP19:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD4]] +; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 4 +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP21]] +; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP13]], align 4 +; CHECK-NEXT: store <vscale x 4 x float> [[TMP19]], ptr [[TMP22]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] +; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] +; CHECK: vec.epilog.iter.check: +; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] +; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP25:%.*]] = mul nuw i64 [[TMP24]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP25]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK: vec.epilog.ph: +; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP27:%.*]] = mul nuw i64 [[TMP26]], 2 +; CHECK-NEXT: [[N_MOD_VF5:%.*]] = urem i64 [[N]], [[TMP27]] +; CHECK-NEXT: [[N_VEC6:%.*]] = sub i64 [[N]], [[N_MOD_VF5]] +; CHECK-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP29:%.*]] = mul nuw i64 [[TMP28]], 2 +; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] +; CHECK: vec.epilog.vector.body: +; CHECK-NEXT: [[INDEX7:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT10:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX7]] +; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 2 x float>, ptr [[TMP30]], align 4 +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX7]] +; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 2 x float>, ptr [[TMP32]], align 4 +; CHECK-NEXT: [[TMP34:%.*]] = fmul <vscale x 2 x float> [[WIDE_LOAD8]], [[WIDE_LOAD9]] +; CHECK-NEXT: store <vscale x 2 x float> [[TMP34]], ptr [[TMP32]], align 4 +; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX7]], [[TMP29]] +; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT10]], [[N_VEC6]] +; CHECK-NEXT: br i1 [[TMP35]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK: vec.epilog.middle.block: +; CHECK-NEXT: [[CMP_N11:%.*]] = icmp eq i64 [[N]], [[N_VEC6]] +; CHECK-NEXT: br i1 [[CMP_N11]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] +; CHECK: vec.epilog.scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC6]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; +; CHECK-VF8-LABEL: @trip_count_vscale( +; CHECK-VF8-NEXT: entry: +; CHECK-VF8-NEXT: [[V:%.*]] = tail call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[N:%.*]] = mul nuw nsw i64 [[V]], 1033 +; CHECK-VF8-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 +; CHECK-VF8-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-VF8: vector.ph: +; CHECK-VF8-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 +; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] +; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 +; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK-VF8: vector.body: +; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-VF8-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A:%.*]], i64 [[INDEX]] +; CHECK-VF8-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 +; CHECK-VF8-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw float, ptr [[TMP6]], i64 [[TMP9]] +; CHECK-VF8-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4 +; CHECK-VF8-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP10]], align 4 +; CHECK-VF8-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw float, ptr [[B:%.*]], i64 [[INDEX]] +; CHECK-VF8-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 4 +; CHECK-VF8-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 [[TMP14]] +; CHECK-VF8-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP11]], align 4 +; CHECK-VF8-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x float>, ptr [[TMP15]], align 4 +; CHECK-VF8-NEXT: [[TMP16:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD2]] +; CHECK-VF8-NEXT: [[TMP17:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD1]], [[WIDE_LOAD3]] +; CHECK-VF8-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4 +; CHECK-VF8-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 [[TMP19]] +; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP16]], ptr [[TMP11]], align 4 +; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP20]], align 4 +; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-VF8-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-VF8-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-VF8: middle.block: +; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-VF8: scalar.ph: +; CHECK-VF8-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-VF8-NEXT: br label [[FOR_BODY:%.*]] +; CHECK-VF8: for.body: +; +entry: + %v = tail call i64 @llvm.vscale.i64() + %n = mul nuw nsw i64 %v, 1033 + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds nuw float, ptr %a, i64 %iv + %l.a = load float, ptr %arrayidx, align 4 + %arrayidx3 = getelementptr inbounds nuw float, ptr %b, i64 %iv + %l.b = load float, ptr %arrayidx3, align 4 + %mul4 = fmul float %l.a, %l.b + store float %mul4, ptr %arrayidx3, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %for.body + +exit: + ret void +} + +; Loop with vscale-based trip count vscale x 1024. +; TODO: No epilogue vectorizations should remain when choosing VF = vscale x 4. +define void @trip_count_vscale_no_epilogue_iterations(ptr noalias %a, ptr noalias %b) vscale_range(1, 16) #0 { +; CHECK-LABEL: @trip_count_vscale_no_epilogue_iterations( +; CHECK-NEXT: iter.check: +; CHECK-NEXT: [[V:%.*]] = tail call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[N:%.*]] = mul nuw nsw i64 [[V]], 1024 +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] +; CHECK: vector.main.loop.iter.check: +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 +; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], [[TMP3]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP5]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8 +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[A:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i64 [[TMP11]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP8]], align 4 +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP12]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[B:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP16]] +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x float>, ptr [[TMP13]], align 4 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x float>, ptr [[TMP17]], align 4 +; CHECK-NEXT: [[TMP18:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD3]] +; CHECK-NEXT: [[TMP19:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD4]] +; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 4 +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw float, ptr [[TMP13]], i64 [[TMP21]] +; CHECK-NEXT: store <vscale x 4 x float> [[TMP18]], ptr [[TMP13]], align 4 +; CHECK-NEXT: store <vscale x 4 x float> [[TMP19]], ptr [[TMP22]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] +; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] +; CHECK: vec.epilog.iter.check: +; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] +; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP25:%.*]] = mul nuw i64 [[TMP24]], 2 +; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], [[TMP25]] +; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; CHECK: vec.epilog.ph: +; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP27:%.*]] = mul nuw i64 [[TMP26]], 2 +; CHECK-NEXT: [[N_MOD_VF5:%.*]] = urem i64 [[N]], [[TMP27]] +; CHECK-NEXT: [[N_VEC6:%.*]] = sub i64 [[N]], [[N_MOD_VF5]] +; CHECK-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP29:%.*]] = mul nuw i64 [[TMP28]], 2 +; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] +; CHECK: vec.epilog.vector.body: +; CHECK-NEXT: [[INDEX7:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT10:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX7]] +; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 2 x float>, ptr [[TMP30]], align 4 +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX7]] +; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 2 x float>, ptr [[TMP32]], align 4 +; CHECK-NEXT: [[TMP34:%.*]] = fmul <vscale x 2 x float> [[WIDE_LOAD8]], [[WIDE_LOAD9]] +; CHECK-NEXT: store <vscale x 2 x float> [[TMP34]], ptr [[TMP32]], align 4 +; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX7]], [[TMP29]] +; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT10]], [[N_VEC6]] +; CHECK-NEXT: br i1 [[TMP35]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK: vec.epilog.middle.block: +; CHECK-NEXT: [[CMP_N11:%.*]] = icmp eq i64 [[N]], [[N_VEC6]] +; CHECK-NEXT: br i1 [[CMP_N11]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] +; CHECK: vec.epilog.scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC6]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; +; CHECK-VF8-LABEL: @trip_count_vscale_no_epilogue_iterations( +; CHECK-VF8-NEXT: entry: +; CHECK-VF8-NEXT: [[V:%.*]] = tail call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[N:%.*]] = mul nuw nsw i64 [[V]], 1024 +; CHECK-VF8-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8 +; CHECK-VF8-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK-VF8: vector.ph: +; CHECK-VF8-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 +; CHECK-VF8-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] +; CHECK-VF8-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; CHECK-VF8-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8 +; CHECK-VF8-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK-VF8: vector.body: +; CHECK-VF8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-VF8-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A:%.*]], i64 [[INDEX]] +; CHECK-VF8-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4 +; CHECK-VF8-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw float, ptr [[TMP6]], i64 [[TMP9]] +; CHECK-VF8-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4 +; CHECK-VF8-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP10]], align 4 +; CHECK-VF8-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw float, ptr [[B:%.*]], i64 [[INDEX]] +; CHECK-VF8-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 4 +; CHECK-VF8-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 [[TMP14]] +; CHECK-VF8-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP11]], align 4 +; CHECK-VF8-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x float>, ptr [[TMP15]], align 4 +; CHECK-VF8-NEXT: [[TMP16:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD2]] +; CHECK-VF8-NEXT: [[TMP17:%.*]] = fmul <vscale x 4 x float> [[WIDE_LOAD1]], [[WIDE_LOAD3]] +; CHECK-VF8-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-VF8-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4 +; CHECK-VF8-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw float, ptr [[TMP11]], i64 [[TMP19]] +; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP16]], ptr [[TMP11]], align 4 +; CHECK-VF8-NEXT: store <vscale x 4 x float> [[TMP17]], ptr [[TMP20]], align 4 +; CHECK-VF8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-VF8-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-VF8-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-VF8: middle.block: +; CHECK-VF8-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; CHECK-VF8-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-VF8: scalar.ph: +; CHECK-VF8-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-VF8-NEXT: br label [[FOR_BODY:%.*]] +; CHECK-VF8: for.body: +; +entry: + %v = tail call i64 @llvm.vscale.i64() + %n = mul nuw nsw i64 %v, 1024 + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds nuw float, ptr %a, i64 %iv + %l.a = load float, ptr %arrayidx, align 4 + %arrayidx3 = getelementptr inbounds nuw float, ptr %b, i64 %iv + %l.b = load float, ptr %arrayidx3, align 4 + %mul4 = fmul float %l.a, %l.b + store float %mul4, ptr %arrayidx3, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %for.body + +exit: + ret void +} + attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll index d5b25bf..21266e5 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s -check-prefix=NO-ZVFBFMIN +; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFBFMIN-PREDICATED ; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfbfmin -S | FileCheck %s -check-prefix=ZVFBFMIN define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) { @@ -21,6 +22,24 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) { ; NO-ZVFBFMIN: [[EXIT]]: ; NO-ZVFBFMIN-NEXT: ret void ; +; NO-ZVFBFMIN-PREDICATED-LABEL: define void @fadd( +; NO-ZVFBFMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { +; NO-ZVFBFMIN-PREDICATED-NEXT: [[ENTRY:.*]]: +; NO-ZVFBFMIN-PREDICATED-NEXT: br label %[[LOOP:.*]] +; NO-ZVFBFMIN-PREDICATED: [[LOOP]]: +; NO-ZVFBFMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] +; NO-ZVFBFMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]] +; NO-ZVFBFMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]] +; NO-ZVFBFMIN-PREDICATED-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP]], align 2 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[Y:%.*]] = load bfloat, ptr [[B_GEP]], align 2 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[Z:%.*]] = fadd bfloat [[X]], [[Y]] +; NO-ZVFBFMIN-PREDICATED-NEXT: store bfloat [[Z]], ptr [[A_GEP]], align 2 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I]], 1 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]] +; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-ZVFBFMIN-PREDICATED: [[EXIT]]: +; NO-ZVFBFMIN-PREDICATED-NEXT: ret void +; ; ZVFBFMIN-LABEL: define void @fadd( ; ZVFBFMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { ; ZVFBFMIN-NEXT: [[ENTRY:.*]]: @@ -133,6 +152,54 @@ define void @vfwmaccbf16.vv(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 ; NO-ZVFBFMIN: [[EXIT]]: ; NO-ZVFBFMIN-NEXT: ret void ; +; NO-ZVFBFMIN-PREDICATED-LABEL: define void @vfwmaccbf16.vv( +; NO-ZVFBFMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { +; NO-ZVFBFMIN-PREDICATED-NEXT: [[ENTRY:.*]]: +; NO-ZVFBFMIN-PREDICATED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4 +; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; NO-ZVFBFMIN-PREDICATED: [[VECTOR_PH]]: +; NO-ZVFBFMIN-PREDICATED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-ZVFBFMIN-PREDICATED-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-ZVFBFMIN-PREDICATED: [[VECTOR_BODY]]: +; NO-ZVFBFMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-ZVFBFMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]] +; NO-ZVFBFMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]] +; NO-ZVFBFMIN-PREDICATED-NEXT: [[C_GEP:%.*]] = getelementptr float, ptr [[C]], i64 [[I]] +; NO-ZVFBFMIN-PREDICATED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <4 x bfloat>, ptr [[A_GEP]], align 2 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = load <4 x bfloat>, ptr [[B_GEP]], align 2 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = load <4 x float>, ptr [[C_GEP]], align 4 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP4:%.*]] = fpext <4 x bfloat> [[WIDE_MASKED_LOAD]] to <4 x float> +; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP5:%.*]] = fpext <4 x bfloat> [[WIDE_MASKED_LOAD3]] to <4 x float> +; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[TMP4]], <4 x float> [[TMP5]], <4 x float> [[WIDE_MASKED_LOAD4]]) +; NO-ZVFBFMIN-PREDICATED-NEXT: store <4 x float> [[TMP6]], ptr [[C_GEP]], align 4 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[I]], 4 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-ZVFBFMIN-PREDICATED: [[MIDDLE_BLOCK]]: +; NO-ZVFBFMIN-PREDICATED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-ZVFBFMIN-PREDICATED: [[SCALAR_PH]]: +; NO-ZVFBFMIN-PREDICATED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; NO-ZVFBFMIN-PREDICATED-NEXT: br label %[[LOOP:.*]] +; NO-ZVFBFMIN-PREDICATED: [[LOOP]]: +; NO-ZVFBFMIN-PREDICATED-NEXT: [[I1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] +; NO-ZVFBFMIN-PREDICATED-NEXT: [[A_GEP1:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I1]] +; NO-ZVFBFMIN-PREDICATED-NEXT: [[B_GEP1:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I1]] +; NO-ZVFBFMIN-PREDICATED-NEXT: [[C_GEP1:%.*]] = getelementptr float, ptr [[C]], i64 [[I1]] +; NO-ZVFBFMIN-PREDICATED-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP1]], align 2 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[Y:%.*]] = load bfloat, ptr [[B_GEP1]], align 2 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[Z:%.*]] = load float, ptr [[C_GEP1]], align 4 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[X_EXT:%.*]] = fpext bfloat [[X]] to float +; NO-ZVFBFMIN-PREDICATED-NEXT: [[Y_EXT:%.*]] = fpext bfloat [[Y]] to float +; NO-ZVFBFMIN-PREDICATED-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[X_EXT]], float [[Y_EXT]], float [[Z]]) +; NO-ZVFBFMIN-PREDICATED-NEXT: store float [[FMULADD]], ptr [[C_GEP1]], align 4 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I1]], 1 +; NO-ZVFBFMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]] +; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; NO-ZVFBFMIN-PREDICATED: [[EXIT]]: +; NO-ZVFBFMIN-PREDICATED-NEXT: ret void +; ; ZVFBFMIN-LABEL: define void @vfwmaccbf16.vv( ; ZVFBFMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; ZVFBFMIN-NEXT: [[ENTRY:.*]]: @@ -213,6 +280,11 @@ exit: ; NO-ZVFBFMIN: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} ; NO-ZVFBFMIN: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} ;. +; NO-ZVFBFMIN-PREDICATED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; NO-ZVFBFMIN-PREDICATED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} +; NO-ZVFBFMIN-PREDICATED: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; NO-ZVFBFMIN-PREDICATED: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} +;. ; ZVFBFMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; ZVFBFMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; ZVFBFMIN: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll index 32d17b9..5d032e6 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -passes=loop-vectorize -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: opt -passes=loop-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s ; Make sure we do not vectorize a loop with a widened int induction. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll index 5b56552..53e43e1 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s -check-prefix=NO-ZVFHMIN +; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFHMIN-PREDICATED ; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfhmin -S | FileCheck %s -check-prefix=ZVFHMIN define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) { @@ -21,6 +22,24 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) { ; NO-ZVFHMIN: [[EXIT]]: ; NO-ZVFHMIN-NEXT: ret void ; +; NO-ZVFHMIN-PREDICATED-LABEL: define void @fadd( +; NO-ZVFHMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { +; NO-ZVFHMIN-PREDICATED-NEXT: [[ENTRY:.*]]: +; NO-ZVFHMIN-PREDICATED-NEXT: br label %[[LOOP:.*]] +; NO-ZVFHMIN-PREDICATED: [[LOOP]]: +; NO-ZVFHMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] +; NO-ZVFHMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr half, ptr [[A]], i64 [[I]] +; NO-ZVFHMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr half, ptr [[B]], i64 [[I]] +; NO-ZVFHMIN-PREDICATED-NEXT: [[X:%.*]] = load half, ptr [[A_GEP]], align 2 +; NO-ZVFHMIN-PREDICATED-NEXT: [[Y:%.*]] = load half, ptr [[B_GEP]], align 2 +; NO-ZVFHMIN-PREDICATED-NEXT: [[Z:%.*]] = fadd half [[X]], [[Y]] +; NO-ZVFHMIN-PREDICATED-NEXT: store half [[Z]], ptr [[A_GEP]], align 2 +; NO-ZVFHMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I]], 1 +; NO-ZVFHMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]] +; NO-ZVFHMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-ZVFHMIN-PREDICATED: [[EXIT]]: +; NO-ZVFHMIN-PREDICATED-NEXT: ret void +; ; ZVFHMIN-LABEL: define void @fadd( ; ZVFHMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { ; ZVFHMIN-NEXT: [[ENTRY:.*]]: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll index f44cbf2..5b2d308 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -mtriple riscv64-linux-gnu -mattr=+v,+d -passes=loop-vectorize < %s -S -o - | FileCheck %s -check-prefix=OUTLOOP ; RUN: opt -mtriple riscv64-linux-gnu -mattr=+v,+d -passes=loop-vectorize -prefer-inloop-reductions < %s -S -o - | FileCheck %s -check-prefix=INLOOP -; RUN: opt -passes=loop-vectorize -force-tail-folding-style=data-with-evl -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck --check-prefix=IF-EVL-OUTLOOP %s -; RUN: opt -passes=loop-vectorize -prefer-inloop-reductions -force-tail-folding-style=data-with-evl -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck --check-prefix=IF-EVL-INLOOP %s +; RUN: opt -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck --check-prefix=IF-EVL-OUTLOOP %s +; RUN: opt -passes=loop-vectorize -prefer-inloop-reductions -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck --check-prefix=IF-EVL-INLOOP %s target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll index dbe6f27..c35a6ab 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter-out-after "^scalar.ph:" --version 2 ; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=scalar-epilogue %s 2>&1 | FileCheck %s -check-prefix=SCALAR_EPILOGUE -; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=predicate-dont-vectorize -force-tail-folding-style=data %s 2>&1 | FileCheck %s -check-prefix=PREDICATED_DATA -; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=predicate-dont-vectorize -force-tail-folding-style=data-with-evl %s 2>&1 | FileCheck %s -check-prefix=PREDICATED_DATA-WITH-EVL +; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -force-tail-folding-style=data %s 2>&1 | FileCheck %s -check-prefix=PREDICATED_DATA +; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue %s 2>&1 | FileCheck %s -check-prefix=PREDICATED_DATA-WITH-EVL target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll index 4f3b8d8..0afe04e 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll @@ -1,5 +1,5 @@ -; RUN: opt -passes=loop-vectorize -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: opt -passes=loop-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S -debug %s 2>&1 | FileCheck %s ; REQUIRES: asserts diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/preserve-dbg-loc.ll b/llvm/test/Transforms/LoopVectorize/RISCV/preserve-dbg-loc.ll index e4892db..f4817bb 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/preserve-dbg-loc.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/preserve-dbg-loc.ll @@ -1,6 +1,5 @@ ; RUN: opt -passes=debugify,loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -S < %s 2>&1 | FileCheck --check-prefix=DEBUGLOC %s ; Testing the debug locations of the generated vector intrinsic is same as diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll index 8d8ea5a..f6f8f6a 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=CHECK +; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=CHECK ; Exercise tail folding on RISCV w/scalable vectors. @@ -330,17 +330,44 @@ for.end: define i64 @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %n) { ; CHECK-LABEL: @uniform_load( ; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK: vector.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[B:%.*]], align 8 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[IV]] -; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[ARRAYIDX]], align 8 +; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; CHECK-NEXT: br label [[FOR_BODY1:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; CHECK-NEXT: [[V1:%.*]] = load i64, ptr [[B]], align 8 +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]] +; CHECK-NEXT: store i64 [[V1]], ptr [[ARRAYIDX1]], align 8 +; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], 1025 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: for.end: -; CHECK-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V]], [[FOR_BODY]] ] +; CHECK-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V1]], [[FOR_BODY1]] ], [ [[V]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i64 [[V_LCSSA]] ; entry: @@ -389,7 +416,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[FOR_END:%.*]] ; CHECK: scalar.ph: @@ -403,7 +430,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) { ; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-bin-unary-ops-args.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll index f90c450..f9395aa 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-bin-unary-ops-args.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll @@ -1,12 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=IF-EVL ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=NO-VP @@ -69,9 +67,45 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_and( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16 +; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1 +; NO-VP-NEXT: [[TMP10:%.*]] = and <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 @@ -79,7 +113,7 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -160,9 +194,45 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_or( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16 +; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1 +; NO-VP-NEXT: [[TMP10:%.*]] = or <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 @@ -170,7 +240,7 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -251,9 +321,45 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_xor( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16 +; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1 +; NO-VP-NEXT: [[TMP10:%.*]] = xor <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 @@ -261,7 +367,7 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -342,9 +448,45 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_shl( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16 +; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1 +; NO-VP-NEXT: [[TMP10:%.*]] = shl <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 @@ -352,7 +494,7 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -433,9 +575,45 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_lshr( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16 +; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1 +; NO-VP-NEXT: [[TMP10:%.*]] = lshr <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 @@ -443,7 +621,7 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP11:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -524,9 +702,45 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_ashr( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16 +; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1 +; NO-VP-NEXT: [[TMP10:%.*]] = ashr <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 @@ -534,7 +748,7 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP13:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -615,9 +829,45 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_add( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16 +; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1 +; NO-VP-NEXT: [[TMP10:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 @@ -625,7 +875,7 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP15:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -706,9 +956,45 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_sub( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16 +; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1 +; NO-VP-NEXT: [[TMP10:%.*]] = sub <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 @@ -716,7 +1002,7 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP17:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -797,9 +1083,45 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_mul( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16 +; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1 +; NO-VP-NEXT: [[TMP10:%.*]] = mul <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 @@ -807,7 +1129,7 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP19:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -888,9 +1210,45 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_sdiv( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16 +; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1 +; NO-VP-NEXT: [[TMP10:%.*]] = sdiv <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 @@ -898,7 +1256,7 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP21:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -979,9 +1337,45 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_udiv( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16 +; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1 +; NO-VP-NEXT: [[TMP10:%.*]] = udiv <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 @@ -989,7 +1383,7 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP23:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -1070,9 +1464,45 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_srem( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16 +; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1 +; NO-VP-NEXT: [[TMP10:%.*]] = srem <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 @@ -1080,7 +1510,7 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP25:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -1161,9 +1591,45 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_urem( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16 +; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1 +; NO-VP-NEXT: [[TMP10:%.*]] = urem <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1 @@ -1171,7 +1637,7 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP27:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -1255,9 +1721,46 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_fadd( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP15]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP11]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = fadd fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00) +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 @@ -1265,7 +1768,7 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store float [[TMP]], ptr [[ARRAYIDX1]], align 4 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP29:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -1347,9 +1850,46 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_fsub( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP15]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP11]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = fsub fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00) +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 @@ -1357,7 +1897,7 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store float [[TMP]], ptr [[ARRAYIDX1]], align 4 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP31:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -1439,9 +1979,46 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_fmul( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP15]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP11]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = fmul fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00) +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 @@ -1449,7 +2026,7 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store float [[TMP]], ptr [[ARRAYIDX1]], align 4 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP33:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -1531,9 +2108,46 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_fdiv( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP15]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP11]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = fdiv fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00) +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 @@ -1541,7 +2155,7 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store float [[TMP]], ptr [[ARRAYIDX1]], align 4 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP35:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -1676,9 +2290,46 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-LABEL: define void @test_fneg( ; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP15]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[B1]], [[A2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP11]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = fneg fast <vscale x 4 x float> [[WIDE_LOAD]] +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1 ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]] ; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 @@ -1686,7 +2337,7 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) { ; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[LEN]] ; NO-VP-NEXT: store float [[TMP]], ptr [[ARRAYIDX1]], align 4 ; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100 -; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP37:![0-9]+]] ; NO-VP: [[FINISH_LOOPEXIT]]: ; NO-VP-NEXT: ret void ; @@ -1748,3 +2399,42 @@ finish.loopexit: ; IF-EVL: [[LOOP37]] = distinct !{[[LOOP37]], [[META1]], [[META2]], [[META3]]} ; IF-EVL: [[LOOP38]] = distinct !{[[LOOP38]], [[META1]]} ;. +; NO-VP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; NO-VP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} +; NO-VP: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; NO-VP: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]} +; NO-VP: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} +; NO-VP: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]} +; NO-VP: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} +; NO-VP: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]} +; NO-VP: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} +; NO-VP: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]} +; NO-VP: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} +; NO-VP: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]} +; NO-VP: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]} +; NO-VP: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]} +; NO-VP: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]} +; NO-VP: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]]} +; NO-VP: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]} +; NO-VP: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]]} +; NO-VP: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]} +; NO-VP: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]]} +; NO-VP: [[LOOP20]] = distinct !{[[LOOP20]], [[META1]], [[META2]]} +; NO-VP: [[LOOP21]] = distinct !{[[LOOP21]], [[META1]]} +; NO-VP: [[LOOP22]] = distinct !{[[LOOP22]], [[META1]], [[META2]]} +; NO-VP: [[LOOP23]] = distinct !{[[LOOP23]], [[META1]]} +; NO-VP: [[LOOP24]] = distinct !{[[LOOP24]], [[META1]], [[META2]]} +; NO-VP: [[LOOP25]] = distinct !{[[LOOP25]], [[META1]]} +; NO-VP: [[LOOP26]] = distinct !{[[LOOP26]], [[META1]], [[META2]]} +; NO-VP: [[LOOP27]] = distinct !{[[LOOP27]], [[META1]]} +; NO-VP: [[LOOP28]] = distinct !{[[LOOP28]], [[META1]], [[META2]]} +; NO-VP: [[LOOP29]] = distinct !{[[LOOP29]], [[META1]]} +; NO-VP: [[LOOP30]] = distinct !{[[LOOP30]], [[META1]], [[META2]]} +; NO-VP: [[LOOP31]] = distinct !{[[LOOP31]], [[META1]]} +; NO-VP: [[LOOP32]] = distinct !{[[LOOP32]], [[META1]], [[META2]]} +; NO-VP: [[LOOP33]] = distinct !{[[LOOP33]], [[META1]]} +; NO-VP: [[LOOP34]] = distinct !{[[LOOP34]], [[META1]], [[META2]]} +; NO-VP: [[LOOP35]] = distinct !{[[LOOP35]], [[META1]]} +; NO-VP: [[LOOP36]] = distinct !{[[LOOP36]], [[META1]], [[META2]]} +; NO-VP: [[LOOP37]] = distinct !{[[LOOP37]], [[META1]]} +;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-call-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll index dfa01a5..7d21c4d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-call-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll @@ -1,12 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=IF-EVL ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=NO-VP define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) { @@ -78,9 +76,53 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-LABEL: define void @vp_smax( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64 +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP19]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP8:%.*]] = sub i64 [[A1]], [[C3]] +; NO-VP-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] +; NO-VP-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; NO-VP-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4 +; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]]) +; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4 ; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] @@ -90,7 +132,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-NEXT: store i32 [[DOT]], ptr [[GEP11]], align 4 ; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -184,9 +226,53 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-LABEL: define void @vp_smin( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64 +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP19]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP8:%.*]] = sub i64 [[A1]], [[C3]] +; NO-VP-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] +; NO-VP-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; NO-VP-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4 +; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]]) +; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4 ; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] @@ -196,7 +282,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-NEXT: store i32 [[DOT]], ptr [[GEP11]], align 4 ; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -290,9 +376,53 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-LABEL: define void @vp_umax( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64 +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP19]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP8:%.*]] = sub i64 [[A1]], [[C3]] +; NO-VP-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] +; NO-VP-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; NO-VP-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4 +; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]]) +; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4 ; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] @@ -302,7 +432,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-NEXT: store i32 [[DOT]], ptr [[GEP11]], align 4 ; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -396,9 +526,53 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-LABEL: define void @vp_umin( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64 +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP19]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP8:%.*]] = sub i64 [[A1]], [[C3]] +; NO-VP-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]] +; NO-VP-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]] +; NO-VP-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4 +; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]]) +; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4 ; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] @@ -408,7 +582,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) { ; NO-VP-NEXT: store i32 [[DOT]], ptr [[GEP11]], align 4 ; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -494,17 +668,54 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_ctlz( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP11]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP15]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.ctlz.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], i1 true) +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4 ; NO-VP-NEXT: [[TMP1:%.*]] = tail call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 [[TMP0]], i1 true) -; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store i32 [[TMP1]], ptr [[GEP3]], align 4 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP11:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -587,17 +798,54 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_cttz( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP11]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP15]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.cttz.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], i1 true) +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4 ; NO-VP-NEXT: [[TMP1:%.*]] = tail call range(i32 0, 33) i32 @llvm.cttz.i32(i32 [[TMP0]], i1 true) -; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store i32 [[TMP1]], ptr [[GEP3]], align 4 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP13:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -684,19 +932,58 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_lrint( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP11]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP17]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = fpext <vscale x 4 x float> [[WIDE_LOAD]] to <vscale x 4 x double> +; NO-VP-NEXT: [[TMP13:%.*]] = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f64(<vscale x 4 x double> [[TMP12]]) +; NO-VP-NEXT: [[TMP14:%.*]] = trunc <vscale x 4 x i64> [[TMP13]] to <vscale x 4 x i32> +; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4 ; NO-VP-NEXT: [[CONV2:%.*]] = fpext float [[TMP0]] to double ; NO-VP-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.lrint.i64.f64(double [[CONV2]]) ; NO-VP-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP1]] to i32 -; NO-VP-NEXT: [[GEP5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store i32 [[CONV3]], ptr [[GEP5]], align 4 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP15:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -785,19 +1072,58 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_llrint( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP11]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP17]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = fpext <vscale x 4 x float> [[WIDE_LOAD]] to <vscale x 4 x double> +; NO-VP-NEXT: [[TMP13:%.*]] = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double> [[TMP12]]) +; NO-VP-NEXT: [[TMP14:%.*]] = trunc <vscale x 4 x i64> [[TMP13]] to <vscale x 4 x i32> +; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4 ; NO-VP-NEXT: [[CONV2:%.*]] = fpext float [[TMP0]] to double ; NO-VP-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.llrint.i64.f64(double [[CONV2]]) ; NO-VP-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP1]] to i32 -; NO-VP-NEXT: [[GEP5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store i32 [[CONV3]], ptr [[GEP5]], align 4 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP17:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -882,17 +1208,54 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_abs( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP11]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], i1 true) +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4 ; NO-VP-NEXT: [[COND:%.*]] = tail call i32 @llvm.abs.i32(i32 [[TMP0]], i1 true) -; NO-VP-NEXT: [[GEP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store i32 [[COND]], ptr [[GEP9]], align 4 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP19:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -937,17 +1300,44 @@ define void @log10(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @log10( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8 +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP1:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16 +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4 +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.log10.v4f32(<4 x float> [[WIDE_LOAD]]) +; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <4 x float> [[TMP2]], ptr [[TMP3]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4 +; NO-VP-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4 ; NO-VP-NEXT: [[COND:%.*]] = tail call float @llvm.log10.f32(float [[TMP0]]) -; NO-VP-NEXT: [[GEP9:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP9:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store float [[COND]], ptr [[GEP9]], align 4 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP21:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -1003,3 +1393,26 @@ declare i32 @llvm.abs.i32(i32, i1 immarg) ; IF-EVL: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]], [[META2]], [[META3]]} ; IF-EVL: [[LOOP20]] = distinct !{[[LOOP20]], [[META1]]} ;. +; NO-VP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; NO-VP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} +; NO-VP: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; NO-VP: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]} +; NO-VP: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} +; NO-VP: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]} +; NO-VP: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} +; NO-VP: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]} +; NO-VP: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} +; NO-VP: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]} +; NO-VP: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} +; NO-VP: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]} +; NO-VP: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]} +; NO-VP: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]} +; NO-VP: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]} +; NO-VP: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]]} +; NO-VP: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]} +; NO-VP: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]]} +; NO-VP: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]} +; NO-VP: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]]} +; NO-VP: [[LOOP20]] = distinct !{[[LOOP20]], [[META1]], [[META2]]} +; NO-VP: [[LOOP21]] = distinct !{[[LOOP21]], [[META1]]} +;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll index 7139170..4b718dd 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cast-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll @@ -1,12 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=IF-EVL ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=NO-VP define void @vp_sext(ptr %a, ptr %b, i64 %N) { @@ -69,17 +67,54 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_sext( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP9]], 2 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 20, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 3 +; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]] +; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 2 +; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]] +; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]] +; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]] +; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i32>, ptr [[GEP]], align 4, !alias.scope [[META0:![0-9]+]] +; NO-VP-NEXT: [[TMP10:%.*]] = sext <vscale x 2 x i32> [[WIDE_LOAD]] to <vscale x 2 x i64> +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META3:![0-9]+]], !noalias [[META0]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4 ; NO-VP-NEXT: [[CONV2:%.*]] = sext i32 [[TMP0]] to i64 -; NO-VP-NEXT: [[GEP4:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP4:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store i64 [[CONV2]], ptr [[GEP4]], align 8 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -161,17 +196,54 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_zext( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP9]], 2 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 20, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 3 +; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]] +; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 2 +; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]] +; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]] +; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]] +; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i32>, ptr [[GEP]], align 4, !alias.scope [[META9:![0-9]+]] +; NO-VP-NEXT: [[TMP10:%.*]] = zext <vscale x 2 x i32> [[WIDE_LOAD]] to <vscale x 2 x i64> +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META12:![0-9]+]], !noalias [[META9]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4 ; NO-VP-NEXT: [[CONV:%.*]] = zext i32 [[TMP0]] to i64 -; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store i64 [[CONV]], ptr [[GEP2]], align 8 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP15:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -253,17 +325,54 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_trunc( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP9]], 2 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 20, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 2 +; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]] +; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 3 +; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]] +; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]] +; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]] +; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[GEP]], align 8, !alias.scope [[META16:![0-9]+]] +; NO-VP-NEXT: [[TMP10:%.*]] = trunc <vscale x 2 x i64> [[WIDE_LOAD]] to <vscale x 2 x i32> +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 2 x i32> [[TMP10]], ptr [[TMP11]], align 4, !alias.scope [[META19:![0-9]+]], !noalias [[META16]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[GEP]], align 8 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[GEP1]], align 8 ; NO-VP-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store i32 [[CONV]], ptr [[GEP2]], align 4 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP22:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -345,17 +454,54 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_fpext( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP9]], 2 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 3 +; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]] +; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 2 +; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]] +; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]] +; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]] +; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x float>, ptr [[GEP]], align 4, !alias.scope [[META23:![0-9]+]] +; NO-VP-NEXT: [[TMP10:%.*]] = fpext <vscale x 2 x float> [[WIDE_LOAD]] to <vscale x 2 x double> +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 2 x double> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META26:![0-9]+]], !noalias [[META23]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4 ; NO-VP-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to double -; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store double [[CONV]], ptr [[GEP2]], align 8 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP29:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -437,17 +583,54 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_fptrunc( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP9]], 2 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 2 +; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]] +; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 3 +; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]] +; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]] +; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]] +; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x double>, ptr [[GEP]], align 8, !alias.scope [[META30:![0-9]+]] +; NO-VP-NEXT: [[TMP10:%.*]] = fptrunc <vscale x 2 x double> [[WIDE_LOAD]] to <vscale x 2 x float> +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 2 x float> [[TMP10]], ptr [[TMP11]], align 4, !alias.scope [[META33:![0-9]+]], !noalias [[META30]] +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load double, ptr [[GEP]], align 8 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load double, ptr [[GEP1]], align 8 ; NO-VP-NEXT: [[CONV:%.*]] = fptrunc double [[TMP0]] to float -; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store float [[CONV]], ptr [[GEP2]], align 4 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP36:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -529,17 +712,54 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_sitofp( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP11]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = sitofp <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x float> +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4 ; NO-VP-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float -; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store float [[CONV]], ptr [[GEP2]], align 4 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP38:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -621,17 +841,54 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_uitofp( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP11]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = uitofp <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x float> +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4 ; NO-VP-NEXT: [[CONV:%.*]] = uitofp i32 [[TMP0]] to float -; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store float [[CONV]], ptr [[GEP2]], align 4 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP40:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -713,17 +970,54 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_fptosi( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP11]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = fptosi <vscale x 4 x float> [[WIDE_LOAD]] to <vscale x 4 x i32> +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4 ; NO-VP-NEXT: [[CONV:%.*]] = fptosi float [[TMP0]] to i32 -; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store i32 [[CONV]], ptr [[GEP2]], align 4 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP42:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -805,17 +1099,54 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_fptoui( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP11]], 4 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = fptoui <vscale x 4 x float> [[WIDE_LOAD]] to <vscale x 4 x i32> +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP]], align 4 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4 ; NO-VP-NEXT: [[CONV:%.*]] = fptoui float [[TMP0]] to i32 -; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store i32 [[CONV]], ptr [[GEP2]], align 4 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP44:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -897,17 +1228,54 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_inttoptr( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64 +; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64 +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP11]], 2 +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP15]]) +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; NO-VP: [[VECTOR_MEMCHECK]]: +; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]] +; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]] +; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 2 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[GEP]], align 8 +; NO-VP-NEXT: [[TMP12:%.*]] = inttoptr <vscale x 2 x i64> [[WIDE_LOAD]] to <vscale x 2 x ptr> +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store <vscale x 2 x ptr> [[TMP12]], ptr [[TMP13]], align 8 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]] +; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[GEP]], align 8 +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[GEP1]], align 8 ; NO-VP-NEXT: [[TMP1:%.*]] = inttoptr i64 [[TMP0]] to ptr -; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[IV1]] ; NO-VP-NEXT: store ptr [[TMP1]], ptr [[GEP2]], align 8 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP46:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -987,16 +1355,50 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) { ; NO-VP-LABEL: define void @vp_ptrtoint( ; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[ENTRY:.*]]: +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 2 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 +; NO-VP-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() +; NO-VP-NEXT: [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1) +; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]] +; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP5]] +; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP8]], i64 0 +; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[B]], <vscale x 2 x i64> [[VEC_IND]] +; NO-VP-NEXT: [[TMP10:%.*]] = ptrtoint <vscale x 2 x ptr> [[TMP9]] to <vscale x 2 x i64> +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] ; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] ; NO-VP-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[GEP]] to i64 ; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store i64 [[TMP0]], ptr [[GEP2]], align 8 ; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP48:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -1068,3 +1470,53 @@ exit: ; IF-EVL: [[LOOP48]] = distinct !{[[LOOP48]], [[META6]], [[META7]], [[META8]]} ; IF-EVL: [[LOOP49]] = distinct !{[[LOOP49]], [[META8]], [[META6]]} ;. +; NO-VP: [[META0]] = !{[[META1:![0-9]+]]} +; NO-VP: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]} +; NO-VP: [[META2]] = distinct !{[[META2]], !"LVerDomain"} +; NO-VP: [[META3]] = !{[[META4:![0-9]+]]} +; NO-VP: [[META4]] = distinct !{[[META4]], [[META2]]} +; NO-VP: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]} +; NO-VP: [[META6]] = !{!"llvm.loop.isvectorized", i32 1} +; NO-VP: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"} +; NO-VP: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]]} +; NO-VP: [[META9]] = !{[[META10:![0-9]+]]} +; NO-VP: [[META10]] = distinct !{[[META10]], [[META11:![0-9]+]]} +; NO-VP: [[META11]] = distinct !{[[META11]], !"LVerDomain"} +; NO-VP: [[META12]] = !{[[META13:![0-9]+]]} +; NO-VP: [[META13]] = distinct !{[[META13]], [[META11]]} +; NO-VP: [[LOOP14]] = distinct !{[[LOOP14]], [[META6]], [[META7]]} +; NO-VP: [[LOOP15]] = distinct !{[[LOOP15]], [[META6]]} +; NO-VP: [[META16]] = !{[[META17:![0-9]+]]} +; NO-VP: [[META17]] = distinct !{[[META17]], [[META18:![0-9]+]]} +; NO-VP: [[META18]] = distinct !{[[META18]], !"LVerDomain"} +; NO-VP: [[META19]] = !{[[META20:![0-9]+]]} +; NO-VP: [[META20]] = distinct !{[[META20]], [[META18]]} +; NO-VP: [[LOOP21]] = distinct !{[[LOOP21]], [[META6]], [[META7]]} +; NO-VP: [[LOOP22]] = distinct !{[[LOOP22]], [[META6]]} +; NO-VP: [[META23]] = !{[[META24:![0-9]+]]} +; NO-VP: [[META24]] = distinct !{[[META24]], [[META25:![0-9]+]]} +; NO-VP: [[META25]] = distinct !{[[META25]], !"LVerDomain"} +; NO-VP: [[META26]] = !{[[META27:![0-9]+]]} +; NO-VP: [[META27]] = distinct !{[[META27]], [[META25]]} +; NO-VP: [[LOOP28]] = distinct !{[[LOOP28]], [[META6]], [[META7]]} +; NO-VP: [[LOOP29]] = distinct !{[[LOOP29]], [[META6]]} +; NO-VP: [[META30]] = !{[[META31:![0-9]+]]} +; NO-VP: [[META31]] = distinct !{[[META31]], [[META32:![0-9]+]]} +; NO-VP: [[META32]] = distinct !{[[META32]], !"LVerDomain"} +; NO-VP: [[META33]] = !{[[META34:![0-9]+]]} +; NO-VP: [[META34]] = distinct !{[[META34]], [[META32]]} +; NO-VP: [[LOOP35]] = distinct !{[[LOOP35]], [[META6]], [[META7]]} +; NO-VP: [[LOOP36]] = distinct !{[[LOOP36]], [[META6]]} +; NO-VP: [[LOOP37]] = distinct !{[[LOOP37]], [[META6]], [[META7]]} +; NO-VP: [[LOOP38]] = distinct !{[[LOOP38]], [[META6]]} +; NO-VP: [[LOOP39]] = distinct !{[[LOOP39]], [[META6]], [[META7]]} +; NO-VP: [[LOOP40]] = distinct !{[[LOOP40]], [[META6]]} +; NO-VP: [[LOOP41]] = distinct !{[[LOOP41]], [[META6]], [[META7]]} +; NO-VP: [[LOOP42]] = distinct !{[[LOOP42]], [[META6]]} +; NO-VP: [[LOOP43]] = distinct !{[[LOOP43]], [[META6]], [[META7]]} +; NO-VP: [[LOOP44]] = distinct !{[[LOOP44]], [[META6]]} +; NO-VP: [[LOOP45]] = distinct !{[[LOOP45]], [[META6]], [[META7]]} +; NO-VP: [[LOOP46]] = distinct !{[[LOOP46]], [[META6]]} +; NO-VP: [[LOOP47]] = distinct !{[[LOOP47]], [[META6]], [[META7]]} +; NO-VP: [[LOOP48]] = distinct !{[[LOOP48]], [[META7]], [[META6]]} +;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cond-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll index f8e8435..5f53cb6 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cond-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll @@ -1,24 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=IF-EVL-OUTLOOP ; RUN: opt -passes=loop-vectorize \ ; RUN: -prefer-inloop-reductions \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=IF-EVL-INLOOP ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=NO-VP-OUTLOOP ; RUN: opt -passes=loop-vectorize \ ; RUN: -prefer-inloop-reductions \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=NO-VP-INLOOP define i32 @cond_add(ptr %a, i64 %n, i32 %start) { diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cost.ll index cd53ea0..238aed1 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cost.ll @@ -3,7 +3,7 @@ ; RUN: -mtriple riscv64-linux-gnu -mattr=+v,+f -S -disable-output -debug-only=loop-vectorize 2>&1 | FileCheck %s --check-prefix=DATA ; RUN: opt < %s -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ -; RUN: -mtriple riscv64-linux-gnu -force-tail-folding-style=data-with-evl -mattr=+v,+f -S \ +; RUN: -mtriple riscv64-linux-gnu -mattr=+v,+f -S \ ; RUN: -disable-output -debug-only=loop-vectorize 2>&1 | FileCheck %s --check-prefix=EVL ; DATA: Cost of 2 for VF 2: EMIT{{.*}} = active lane mask diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-div.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll index 0a66ce8..f27f49e 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-div.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll @@ -1,12 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=IF-EVL ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=NO-VP define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { @@ -65,9 +63,38 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-LABEL: define void @test_sdiv( ; NO-VP-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0:[0-9]+]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP12]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8 +; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8 +; NO-VP-NEXT: [[TMP8:%.*]] = sdiv <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_GEP]], align 8 ; NO-VP-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] @@ -77,7 +104,7 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-NEXT: store i64 [[TMP2]], ptr [[C_GEP]], align 8 ; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; NO-VP-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -158,9 +185,38 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-LABEL: define void @test_udiv( ; NO-VP-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP12]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8 +; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8 +; NO-VP-NEXT: [[TMP8:%.*]] = udiv <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_GEP]], align 8 ; NO-VP-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] @@ -170,7 +226,7 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-NEXT: store i64 [[TMP2]], ptr [[C_GEP]], align 8 ; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; NO-VP-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -250,9 +306,38 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-LABEL: define void @test_srem( ; NO-VP-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP12]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8 +; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8 +; NO-VP-NEXT: [[TMP8:%.*]] = srem <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_GEP]], align 8 ; NO-VP-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] @@ -262,7 +347,7 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-NEXT: store i64 [[TMP2]], ptr [[C_GEP]], align 8 ; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; NO-VP-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -342,9 +427,38 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-LABEL: define void @test_urem( ; NO-VP-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] { ; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]: +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP12]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; NO-VP: [[VECTOR_PH]]: +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 +; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]] +; NO-VP: [[VECTOR_BODY]]: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[A]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8 +; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8 +; NO-VP-NEXT: [[TMP8:%.*]] = urem <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]] +; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; NO-VP: [[MIDDLE_BLOCK]]: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; NO-VP: [[SCALAR_PH]]: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ] ; NO-VP-NEXT: br label %[[LOOP:.*]] ; NO-VP: [[LOOP]]: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] ; NO-VP-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_GEP]], align 8 ; NO-VP-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]] @@ -354,7 +468,7 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) { ; NO-VP-NEXT: store i64 [[TMP2]], ptr [[C_GEP]], align 8 ; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; NO-VP-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024 -; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]] +; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]] ; NO-VP: [[EXIT]]: ; NO-VP-NEXT: ret void ; @@ -390,3 +504,14 @@ exit: ; IF-EVL: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]], [[META3]]} ; IF-EVL: [[LOOP10]] = distinct !{[[LOOP10]], [[META3]], [[META1]]} ;. +; NO-VP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; NO-VP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} +; NO-VP: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; NO-VP: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} +; NO-VP: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} +; NO-VP: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]} +; NO-VP: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} +; NO-VP: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]} +; NO-VP: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} +; NO-VP: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]} +;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll index 7c8f111a..6a6bca2d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll @@ -1,14 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt -passes=loop-vectorize \ ; RUN: -prefer-inloop-reductions \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=IF-EVL ; RUN: opt -passes=loop-vectorize \ ; RUN: -prefer-inloop-reductions \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=NO-VP define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { @@ -482,20 +480,62 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) { ; IF-EVL-LABEL: define i32 @FOR_reduction( ; IF-EVL-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[TC:%.*]]) #[[ATTR0]] { ; IF-EVL-NEXT: [[ENTRY:.*]]: +; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP9]], 4 +; IF-EVL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TC]], [[TMP1]] +; IF-EVL-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; IF-EVL: [[VECTOR_PH]]: +; IF-EVL-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]] +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 +; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() +; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4 +; IF-EVL-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 +; IF-EVL-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 33, i32 [[TMP8]] +; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]] +; IF-EVL: [[VECTOR_BODY]]: +; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], %[[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]] +; IF-EVL-NEXT: [[WIDE_LOAD]] = load <vscale x 4 x i32>, ptr [[ARRAYIDX]], align 4 +; IF-EVL-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> [[VECTOR_RECUR]], <vscale x 4 x i32> [[WIDE_LOAD]], i32 -1) +; IF-EVL-NEXT: [[TMP11:%.*]] = add nsw <vscale x 4 x i32> [[TMP10]], [[WIDE_LOAD]] +; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]] +; IF-EVL-NEXT: store <vscale x 4 x i32> [[TMP11]], ptr [[TMP12]], align 4 +; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS]], [[TMP5]] +; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IF-EVL: [[MIDDLE_BLOCK]]: +; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32() +; IF-EVL-NEXT: [[TMP15:%.*]] = mul nuw i32 [[TMP14]], 4 +; IF-EVL-NEXT: [[TMP16:%.*]] = sub i32 [[TMP15]], 1 +; IF-EVL-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 4 x i32> [[WIDE_LOAD]], i32 [[TMP16]] +; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() +; IF-EVL-NEXT: [[TMP18:%.*]] = mul nuw i32 [[TMP17]], 4 +; IF-EVL-NEXT: [[TMP19:%.*]] = sub i32 [[TMP18]], 2 +; IF-EVL-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <vscale x 4 x i32> [[WIDE_LOAD]], i32 [[TMP19]] +; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TC]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; IF-EVL: [[SCALAR_PH]]: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; IF-EVL-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 33, %[[ENTRY]] ] ; IF-EVL-NEXT: br label %[[FOR_BODY:.*]] ; IF-EVL: [[FOR_BODY]]: -; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP0:%.*]], %[[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]] -; IF-EVL-NEXT: [[TMP0]] = load i32, ptr [[ARRAYIDX]], align 4 +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ] +; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP0:%.*]], %[[FOR_BODY]] ] +; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]] +; IF-EVL-NEXT: [[TMP0]] = load i32, ptr [[ARRAYIDX1]], align 4 ; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[TMP0]] -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]] +; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[IV]] ; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[INDVARS]], 1 +; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[IV]], 1 ; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; IF-EVL: [[FOR_END]]: -; IF-EVL-NEXT: [[FOR1_LCSSA:%.*]] = phi i32 [ [[FOR1]], %[[FOR_BODY]] ] +; IF-EVL-NEXT: [[FOR1_LCSSA:%.*]] = phi i32 [ [[FOR1]], %[[FOR_BODY]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[FOR1_LCSSA]] ; ; NO-VP-LABEL: define i32 @FOR_reduction( @@ -733,8 +773,8 @@ for.end: ; IF-EVL: [[LOOP6]] = distinct !{[[LOOP6]], [[META3]], [[META1]]} ; IF-EVL: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]], [[META3]]} ; IF-EVL: [[LOOP8]] = distinct !{[[LOOP8]], [[META3]], [[META1]]} -; IF-EVL: [[LOOP9]] = distinct !{[[LOOP9]], [[META10:![0-9]+]]} -; IF-EVL: [[META10]] = !{!"llvm.loop.vectorize.enable", i1 true} +; IF-EVL: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META3]]} +; IF-EVL: [[LOOP10]] = distinct !{[[LOOP10]], [[META3]], [[META1]]} ; IF-EVL: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]], [[META3]]} ; IF-EVL: [[LOOP12]] = distinct !{[[LOOP12]], [[META3]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll index 2b7a9fb..892293b 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll @@ -1,12 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=IF-EVL ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=NO-VP define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %index, i64 %n) { @@ -69,18 +67,54 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde ; ; NO-VP-LABEL: @gather_scatter( ; NO-VP-NEXT: entry: +; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 2 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP14]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; NO-VP: vector.ph: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 +; NO-VP-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64() +; NO-VP-NEXT: [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1) +; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]] +; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP5]] +; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP8]], i64 0 +; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; NO-VP-NEXT: br label [[FOR_BODY:%.*]] +; NO-VP: vector.body: +; NO-VP-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], <vscale x 2 x i64> [[VEC_IND]] +; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> [[TMP9]], i32 8, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> poison) +; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], <vscale x 2 x i64> [[WIDE_MASKED_GATHER]] +; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> [[TMP10]], i32 4, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x float> poison) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], <vscale x 2 x i64> [[WIDE_MASKED_GATHER]] +; NO-VP-NEXT: call void @llvm.masked.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> [[WIDE_MASKED_GATHER2]], <vscale x 2 x ptr> [[TMP11]], i32 4, <vscale x 2 x i1> splat (i1 true)) +; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nuw i64 [[INDVARS_IV]], [[TMP5]] +; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; NO-VP-NEXT: br label [[FOR_BODY1:%.*]] ; NO-VP: for.body: -; NO-VP-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; NO-VP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], i64 [[INDVARS_IV]] +; NO-VP-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; NO-VP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX]], i64 [[INDVARS_IV1]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8 -; NO-VP-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], i64 [[TMP0]] +; NO-VP-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[TMP0]] ; NO-VP-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX5]], align 4 -; NO-VP-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], i64 [[TMP0]] +; NO-VP-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT]], i64 [[TMP0]] ; NO-VP-NEXT: store float [[TMP1]], ptr [[ARRAYIDX7]], align 4 -; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; NO-VP-NEXT: [[INDVARS_IV_NEXT1]] = add nuw nsw i64 [[INDVARS_IV1]], 1 +; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT1]], [[N]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP3:![0-9]+]] ; NO-VP: for.end: ; NO-VP-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll index a7955a5..e0594ad 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll @@ -1,14 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes=loop-vectorize \ ; RUN: -prefer-inloop-reductions \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=IF-EVL ; RUN: opt -passes=loop-vectorize \ ; RUN: -prefer-inloop-reductions \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=NO-VP define i32 @add(ptr %a, i64 %n, i32 %start) { @@ -124,35 +122,34 @@ for.end: define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-LABEL: @mul( ; IF-EVL-NEXT: entry: -; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_RND_UP:%.*]], 8 +; IF-EVL-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 3 -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT1]], <4 x i64> poison, <4 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[IV]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3> -; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT2]] +; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 1, [[ENTRY]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP2]], i32 4, <4 x i1> [[TMP1]], <4 x i32> poison) -; IF-EVL-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[WIDE_MASKED_LOAD]], <4 x i32> splat (i32 1) +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 4 +; IF-EVL-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 +; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP4]]) ; IF-EVL-NEXT: [[MUL]] = mul i32 [[TMP5]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 4 +; IF-EVL-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD2]]) +; IF-EVL-NEXT: [[TMP6]] = mul i32 [[TMP8]], [[VEC_PHI1]] +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] ; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] +; IF-EVL-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP6]], [[MUL]] +; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; IF-EVL: scalar.ph: -; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ] -; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] ; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: ; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] @@ -161,10 +158,10 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[MUL1]] = mul nsw i32 [[TMP0]], [[RDX1]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] ; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL1]], [[FOR_BODY1]] ], [ [[MUL]], [[MIDDLE_BLOCK]] ] +; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL1]], [[FOR_BODY1]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret i32 [[MUL_LCSSA]] ; ; NO-VP-LABEL: @mul( @@ -1116,35 +1113,34 @@ for.end: define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-LABEL: @fmul( ; IF-EVL-NEXT: entry: -; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_RND_UP:%.*]], 8 +; IF-EVL-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 3 -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT1]], <4 x i64> poison, <4 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[IV]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3> -; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT2]] +; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi float [ 1.000000e+00, [[ENTRY]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP2]], i32 4, <4 x i1> [[TMP1]], <4 x float> poison) -; IF-EVL-NEXT: [[TMP4:%.*]] = select reassoc <4 x i1> [[TMP1]], <4 x float> [[WIDE_MASKED_LOAD]], <4 x float> splat (float 1.000000e+00) +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 4 +; IF-EVL-NEXT: [[TMP4:%.*]] = load <4 x float>, ptr [[TMP2]], align 4 +; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP5:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP4]]) ; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[TMP5]], [[RDX]] -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 4 +; IF-EVL-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD2]]) +; IF-EVL-NEXT: [[TMP6]] = fmul reassoc float [[TMP8]], [[VEC_PHI1]] +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] ; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; IF-EVL: middle.block: -; IF-EVL-NEXT: br label [[FOR_END:%.*]] +; IF-EVL-NEXT: [[BIN_RDX:%.*]] = fmul reassoc float [[TMP6]], [[MUL]] +; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; IF-EVL: scalar.ph: -; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ] -; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] ; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: ; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] @@ -1153,10 +1149,10 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[MUL1]] = fmul reassoc float [[TMP0]], [[RDX1]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] ; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP24:![0-9]+]] ; IF-EVL: for.end: -; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL1]], [[FOR_BODY1]] ], [ [[MUL]], [[MIDDLE_BLOCK]] ] +; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL1]], [[FOR_BODY1]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ] ; IF-EVL-NEXT: ret float [[MUL_LCSSA]] ; ; NO-VP-LABEL: @fmul( @@ -1451,37 +1447,35 @@ for.end: define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-LABEL: @fminimum( ; IF-EVL-NEXT: entry: -; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_RND_UP:%.*]], 16 +; IF-EVL-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 7 -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16 ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT3]], <8 x i64> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <8 x float> poison, float [[START:%.*]], i64 0 ; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT2]], <8 x float> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[IV]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer -; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <8 x i64> [[BROADCAST_SPLAT2]], <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7> -; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <8 x i64> [[VEC_IV]], [[BROADCAST_SPLAT4]] +; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr [[TMP2]], i32 4, <8 x i1> [[TMP1]], <8 x float> poison) +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 +; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]]) -; IF-EVL-NEXT: [[TMP5:%.*]] = select <8 x i1> [[TMP1]], <8 x float> [[TMP4]], <8 x float> [[VEC_PHI]] -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 8 +; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] ; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; IF-EVL: middle.block: +; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] +; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; IF-EVL: scalar.ph: -; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ] -; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] ; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: ; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] @@ -1490,7 +1484,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[MIN]] = tail call float @llvm.minimum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] ; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP30:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] @@ -1561,37 +1555,35 @@ for.end: define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-LABEL: @fmaximum( ; IF-EVL-NEXT: entry: -; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_RND_UP:%.*]], 16 +; IF-EVL-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 7 -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16 ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT3]], <8 x i64> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <8 x float> poison, float [[START:%.*]], i64 0 ; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT2]], <8 x float> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[IV]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer -; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <8 x i64> [[BROADCAST_SPLAT2]], <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7> -; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <8 x i64> [[VEC_IV]], [[BROADCAST_SPLAT4]] +; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr [[TMP2]], i32 4, <8 x i1> [[TMP1]], <8 x float> poison) +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 +; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]]) -; IF-EVL-NEXT: [[TMP5:%.*]] = select <8 x i1> [[TMP1]], <8 x float> [[TMP4]], <8 x float> [[VEC_PHI]] -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 8 +; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] ; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] ; IF-EVL: middle.block: +; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] +; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; IF-EVL: scalar.ph: -; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ] -; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] ; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: ; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] @@ -1600,7 +1592,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[MAX]] = tail call float @llvm.maximum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] ; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP32:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll index b2ebe6f..80b0714 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll @@ -1,12 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=IF-EVL %s ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=NO-VP %s ; FIXME: interleaved accesses are not supported yet with predicated vectorization. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-intermediate-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll index 314c201..c7808b1 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-intermediate-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll @@ -1,24 +1,20 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=IF-EVL-OUTLOOP ; RUN: opt -passes=loop-vectorize \ ; RUN: -prefer-inloop-reductions \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=IF-EVL-INLOOP ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=NO-VP-OUTLOOP ; RUN: opt -passes=loop-vectorize \ ; RUN: -prefer-inloop-reductions \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=NO-VP-INLOOP define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr) { diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-iv32.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll index 2c265c9..bda9145 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-iv32.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll @@ -1,12 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=IF-EVL %s ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=NO-VP %s define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) { diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-known-no-overflow.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll index 2c77d78..2601bfd 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-known-no-overflow.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll @@ -1,7 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s ; TODO: We know the IV will never overflow here so we can skip the overflow diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll index 1efd539..226372f 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-masked-loadstore.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll @@ -1,12 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=IF-EVL ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=NO-VP define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) { @@ -66,23 +64,52 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) { ; ; NO-VP-LABEL: @masked_loadstore( ; NO-VP-NEXT: entry: +; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP12]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; NO-VP: vector.ph: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] +; NO-VP-NEXT: [[INC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[FOR_BODY:%.*]] +; NO-VP: vector.body: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4 +; NO-VP-NEXT: [[TMP7:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], zeroinitializer +; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX]] +; NO-VP-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> poison) +; NO-VP-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[WIDE_MASKED_LOAD]] +; NO-VP-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP9]], ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]]) +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[INC]] +; NO-VP-NEXT: br i1 [[TMP10]], label [[FOR_INC:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[INC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[I_011:%.*]] = phi i64 [ [[INC]], [[FOR_INC]] ], [ 0, [[ENTRY:%.*]] ] +; NO-VP-NEXT: br label [[FOR_BODY1:%.*]] ; NO-VP: for.body: -; NO-VP-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ] -; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[I_011]] +; NO-VP-NEXT: [[I_11:%.*]] = phi i64 [ [[INC1:%.*]], [[FOR_INC1:%.*]] ], [ [[I_011]], [[SCALAR_PH]] ] +; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I_11]] ; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; NO-VP-NEXT: [[CMP1:%.*]] = icmp ne i32 [[TMP0]], 0 -; NO-VP-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] +; NO-VP-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC1]] ; NO-VP: if.then: -; NO-VP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[I_011]] +; NO-VP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I_11]] ; NO-VP-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4 ; NO-VP-NEXT: [[ADD:%.*]] = add i32 [[TMP0]], [[TMP1]] ; NO-VP-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX3]], align 4 -; NO-VP-NEXT: br label [[FOR_INC]] +; NO-VP-NEXT: br label [[FOR_INC1]] ; NO-VP: for.inc: -; NO-VP-NEXT: [[INC]] = add nuw nsw i64 [[I_011]], 1 -; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N:%.*]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT:%.*]], label [[FOR_BODY]] +; NO-VP-NEXT: [[INC1]] = add nuw nsw i64 [[I_11]], 1 +; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC1]], [[N]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY1]], !llvm.loop [[LOOP3:![0-9]+]] ; NO-VP: exit: ; NO-VP-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-no-masking.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-no-masking.ll index 1c49fba..6285a6f 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-no-masking.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-no-masking.ll @@ -1,12 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s ; No need to emit predicated vector code if the vector instructions with masking are not required. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-ordered-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll index 81e0f75..41bddaa 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-ordered-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll @@ -1,14 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes=loop-vectorize \ ; RUN: -force-ordered-reductions=true -hints-allow-reordering=false \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=IF-EVL ; RUN: opt -passes=loop-vectorize \ ; RUN: -force-ordered-reductions=true -hints-allow-reordering=false \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=NO-VP define float @fadd(ptr noalias nocapture readonly %a, i64 %n) { @@ -60,18 +58,45 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) { ; ; NO-VP-LABEL: @fadd( ; NO-VP-NEXT: entry: +; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N1:%.*]], [[TMP1]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; NO-VP: vector.ph: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N1]], [[TMP3]] +; NO-VP-NEXT: [[N:%.*]] = sub i64 [[N1]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 ; NO-VP-NEXT: br label [[FOR_BODY:%.*]] -; NO-VP: for.body: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP: vector.body: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; NO-VP-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; NO-VP-NEXT: [[ADD]] = fadd float [[TMP0]], [[SUM_07]] -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[ARRAYIDX]], align 4 +; NO-VP-NEXT: [[ADD]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[SUM_07]], <vscale x 4 x float> [[WIDE_LOAD]]) +; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]] +; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N1]], [[N]] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; NO-VP-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[ADD]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY1]] ] +; NO-VP-NEXT: br label [[FOR_BODY1:%.*]] +; NO-VP: for.body: +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; NO-VP-NEXT: [[SUM_7:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY1]] ] +; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]] +; NO-VP-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +; NO-VP-NEXT: [[ADD1]] = fadd float [[TMP9]], [[SUM_7]] +; NO-VP-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 +; NO-VP-NEXT: [[EXITCOND_NOT1:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N1]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT1]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP3:![0-9]+]] ; NO-VP: for.end: -; NO-VP-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY1]] ], [ [[ADD]], [[MIDDLE_BLOCK]] ] ; NO-VP-NEXT: ret float [[ADD_LCSSA]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reduction-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction-cost.ll index 15f9993..10d83f4 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reduction-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction-cost.ll @@ -1,7 +1,6 @@ ; REQUIRES: asserts ; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize --disable-output \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck %s ; CHECK: Cost of 2 for VF vscale x 4: WIDEN-INTRINSIC vp<%{{.+}}> = call llvm.vp.merge(ir<true>, ir<%add>, ir<%rdx>, vp<%{{.+}}>) diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll index 73fd389..3a963b0 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll @@ -1,12 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=IF-EVL ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=NO-VP define i32 @add(ptr %a, i64 %n, i32 %start) { @@ -125,36 +123,34 @@ for.end: define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-LABEL: @mul( ; IF-EVL-NEXT: entry: -; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_RND_UP:%.*]], 16 +; IF-EVL-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 7 -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16 ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 ; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <8 x i32> splat (i32 1), i32 [[START:%.*]], i32 0 -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ [[TMP9]], [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[IV]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer -; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <8 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7> -; IF-EVL-NEXT: [[TMP2:%.*]] = icmp ule <8 x i64> [[VEC_IV]], [[BROADCAST_SPLAT2]] +; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ splat (i32 1), [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP3]], i32 4, <8 x i1> [[TMP2]], <8 x i32> poison) +; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 8 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x i32>, ptr [[TMP3]], align 4 +; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[TMP5]] = mul <8 x i32> [[WIDE_MASKED_LOAD]], [[VEC_PHI]] -; IF-EVL-NEXT: [[TMP6:%.*]] = select <8 x i1> [[TMP2]], <8 x i32> [[TMP5]], <8 x i32> [[VEC_PHI]] -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 8 +; IF-EVL-NEXT: [[TMP4]] = mul <8 x i32> [[WIDE_LOAD2]], [[VEC_PHI1]] +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] ; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; IF-EVL: middle.block: +; IF-EVL-NEXT: [[TMP6:%.*]] = mul <8 x i32> [[TMP4]], [[TMP5]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP6]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] +; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; IF-EVL: scalar.ph: -; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ] -; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP8]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] ; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: ; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] @@ -163,7 +159,7 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) { ; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[MUL]] = mul nsw i32 [[TMP0]], [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] ; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP6:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], [[FOR_BODY1]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] @@ -1158,36 +1154,34 @@ for.end: define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-LABEL: @fmul( ; IF-EVL-NEXT: entry: -; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_RND_UP:%.*]], 16 +; IF-EVL-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 7 -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16 ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 ; IF-EVL-NEXT: [[TMP9:%.*]] = insertelement <8 x float> splat (float 1.000000e+00), float [[START:%.*]], i32 0 -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[TMP9]], [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[IV]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer -; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <8 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7> -; IF-EVL-NEXT: [[TMP2:%.*]] = icmp ule <8 x i64> [[VEC_IV]], [[BROADCAST_SPLAT2]] +; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ splat (float 1.000000e+00), [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr [[TMP3]], i32 4, <8 x i1> [[TMP2]], <8 x float> poison) +; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 8 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP3]], align 4 +; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 ; IF-EVL-NEXT: [[TMP5]] = fmul reassoc <8 x float> [[WIDE_MASKED_LOAD]], [[VEC_PHI]] -; IF-EVL-NEXT: [[TMP6:%.*]] = select reassoc <8 x i1> [[TMP2]], <8 x float> [[TMP5]], <8 x float> [[VEC_PHI]] -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 8 +; IF-EVL-NEXT: [[TMP4]] = fmul reassoc <8 x float> [[WIDE_LOAD2]], [[VEC_PHI1]] +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] ; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; IF-EVL: middle.block: +; IF-EVL-NEXT: [[TMP6:%.*]] = fmul reassoc <8 x float> [[TMP4]], [[TMP5]] ; IF-EVL-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v8f32(float 1.000000e+00, <8 x float> [[TMP6]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] +; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; IF-EVL: scalar.ph: -; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ] -; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP8]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] ; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: ; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] @@ -1196,7 +1190,7 @@ define float @fmul(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[TMP0]], [[RDX]] ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] ; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP24:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MUL_LCSSA:%.*]] = phi float [ [[MUL]], [[FOR_BODY1]] ], [ [[TMP8]], [[MIDDLE_BLOCK]] ] @@ -1504,37 +1498,35 @@ for.end: define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-LABEL: @fminimum( ; IF-EVL-NEXT: entry: -; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_RND_UP:%.*]], 16 +; IF-EVL-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 7 -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16 ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT3]], <8 x i64> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <8 x float> poison, float [[START:%.*]], i64 0 ; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT2]], <8 x float> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[IV]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer -; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <8 x i64> [[BROADCAST_SPLAT2]], <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7> -; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <8 x i64> [[VEC_IV]], [[BROADCAST_SPLAT4]] +; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr [[TMP2]], i32 4, <8 x i1> [[TMP1]], <8 x float> poison) +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 +; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]]) -; IF-EVL-NEXT: [[TMP5:%.*]] = select <8 x i1> [[TMP1]], <8 x float> [[TMP4]], <8 x float> [[VEC_PHI]] -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 8 +; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] ; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; IF-EVL: middle.block: +; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] +; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; IF-EVL: scalar.ph: -; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ] -; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] ; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: ; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] @@ -1543,7 +1535,7 @@ define float @fminimum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[MIN]] = tail call float @llvm.minimum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] ; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP30:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MIN_LCSSA:%.*]] = phi float [ [[MIN]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] @@ -1614,37 +1606,35 @@ for.end: define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-LABEL: @fmaximum( ; IF-EVL-NEXT: entry: -; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N_RND_UP:%.*]], 16 +; IF-EVL-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] ; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 7 -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16 ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT3]], <8 x i64> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <8 x float> poison, float [[START:%.*]], i64 0 ; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x float> [[BROADCAST_SPLATINSERT2]], <8 x float> poison, <8 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> poison, i64 [[IV]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> poison, <8 x i32> zeroinitializer -; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <8 x i64> [[BROADCAST_SPLAT2]], <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7> -; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <8 x i64> [[VEC_IV]], [[BROADCAST_SPLAT4]] +; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]] -; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr [[TMP2]], i32 4, <8 x i1> [[TMP1]], <8 x float> poison) +; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4 +; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP1]], align 4 ; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]]) -; IF-EVL-NEXT: [[TMP5:%.*]] = select <8 x i1> [[TMP1]], <8 x float> [[TMP4]], <8 x float> [[VEC_PHI]] -; IF-EVL-NEXT: [[IV_NEXT]] = add i64 [[IV]], 8 +; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]]) +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16 ; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] ; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] ; IF-EVL: middle.block: +; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]]) ; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]]) -; IF-EVL-NEXT: br label [[FOR_END:%.*]] +; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; IF-EVL: scalar.ph: -; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ] -; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY1]] ] +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY1]] ] ; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] ; IF-EVL: for.body: ; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] @@ -1653,7 +1643,7 @@ define float @fmaximum(ptr %a, i64 %n, float %start) { ; IF-EVL-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; IF-EVL-NEXT: [[MAX]] = tail call float @llvm.maximum.f32(float [[RDX]], float [[TMP0]]) ; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N_RND_UP]] ; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP32:![0-9]+]] ; IF-EVL: for.end: ; IF-EVL-NEXT: [[MAX_LCSSA:%.*]] = phi float [ [[MAX]], [[FOR_BODY1]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll index 01033c8..62eef6c 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll @@ -1,12 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=IF-EVL ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=NO-VP define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %ptr2) { @@ -75,18 +73,61 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt ; ; NO-VP-LABEL: @reverse_load_store( ; NO-VP-NEXT: entry: +; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; NO-VP: vector.ph: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[STARTVAL:%.*]], [[N_VEC]] +; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; NO-VP-NEXT: br label [[FOR_BODY:%.*]] +; NO-VP: vector.body: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL]], [[INDEX]] +; NO-VP-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], -1 +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[TMP8]] +; NO-VP-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP5]] +; NO-VP-NEXT: [[TMP11:%.*]] = sub i64 [[TMP5]], 1 +; NO-VP-NEXT: [[TMP12:%.*]] = mul i64 -1, [[TMP11]] +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 [[TMP10]] +; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 [[TMP12]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4 +; NO-VP-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]]) +; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[TMP8]] +; NO-VP-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]] +; NO-VP-NEXT: [[TMP17:%.*]] = sub i64 [[TMP5]], 1 +; NO-VP-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]] +; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[TMP16]] +; NO-VP-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP19]], i64 [[TMP18]] +; NO-VP-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[REVERSE]]) +; NO-VP-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP20]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[LOOPEND:%.*]], label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[STARTVAL]], [[ENTRY:%.*]] ] +; NO-VP-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i32 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; NO-VP-NEXT: br label [[FOR_BODY1:%.*]] ; NO-VP: for.body: -; NO-VP-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL:%.*]], [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; NO-VP-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY1]] ] +; NO-VP-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY1]] ] ; NO-VP-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 -; NO-VP-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[ADD]] +; NO-VP-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[ADD]] ; NO-VP-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 -; NO-VP-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[ADD]] +; NO-VP-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]] ; NO-VP-NEXT: store i32 [[TMP]], ptr [[GEPS]], align 4 ; NO-VP-NEXT: [[INC]] = add i32 [[I]], 1 ; NO-VP-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024 -; NO-VP-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND:%.*]] +; NO-VP-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY1]], label [[LOOPEND]], !llvm.loop [[LOOP3:![0-9]+]] ; NO-VP: loopend: ; NO-VP-NEXT: ret void ; @@ -188,25 +229,74 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal ; ; NO-VP-LABEL: @reverse_load_store_masked( ; NO-VP-NEXT: entry: +; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[ENTRY:%.*]], label [[VECTOR_PH:%.*]] +; NO-VP: vector.ph: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4 +; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[STARTVAL1:%.*]], [[N_VEC]] +; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; NO-VP-NEXT: br label [[FOR_BODY:%.*]] +; NO-VP: vector.body: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL1]], [[INDEX]] +; NO-VP-NEXT: [[OFFSET_IDX1:%.*]] = trunc i64 [[INDEX]] to i32 +; NO-VP-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], -1 +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i32 [[OFFSET_IDX1]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4 +; NO-VP-NEXT: [[TMP10:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 100) +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[PTR1:%.*]], i64 [[TMP8]] +; NO-VP-NEXT: [[TMP12:%.*]] = mul i64 0, [[TMP5]] +; NO-VP-NEXT: [[TMP13:%.*]] = sub i64 [[TMP5]], 1 +; NO-VP-NEXT: [[TMP14:%.*]] = mul i64 -1, [[TMP13]] +; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP11]], i64 [[TMP12]] +; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP15]], i64 [[TMP14]] +; NO-VP-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[TMP10]]) +; NO-VP-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP16]], i32 4, <vscale x 4 x i1> [[REVERSE]], <vscale x 4 x i32> poison) +; NO-VP-NEXT: [[REVERSE2:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]]) +; NO-VP-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[PTR2:%.*]], i64 [[TMP8]] +; NO-VP-NEXT: [[TMP18:%.*]] = mul i64 0, [[TMP5]] +; NO-VP-NEXT: [[TMP19:%.*]] = sub i64 [[TMP5]], 1 +; NO-VP-NEXT: [[TMP20:%.*]] = mul i64 -1, [[TMP19]] +; NO-VP-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[TMP17]], i64 [[TMP18]] +; NO-VP-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP21]], i64 [[TMP20]] +; NO-VP-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[TMP10]]) +; NO-VP-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[REVERSE2]]) +; NO-VP-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[REVERSE4]], ptr [[TMP22]], i32 4, <vscale x 4 x i1> [[REVERSE3]]) +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[LOOPEND:%.*]], label [[ENTRY]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[STARTVAL:%.*]] = phi i64 [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[STARTVAL1]], [[ENTRY1:%.*]] ] +; NO-VP-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1]] ] +; NO-VP-NEXT: br label [[FOR_BODY1:%.*]] ; NO-VP: for.body: -; NO-VP-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL:%.*]], [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ] -; NO-VP-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_INC]] ] +; NO-VP-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ] +; NO-VP-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL5]], [[ENTRY]] ], [ [[INC:%.*]], [[FOR_INC]] ] ; NO-VP-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 -; NO-VP-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i32 [[I]] +; NO-VP-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i32 [[I]] ; NO-VP-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 ; NO-VP-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP]], 100 ; NO-VP-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] ; NO-VP: if.then: -; NO-VP-NEXT: [[GEPL1:%.*]] = getelementptr inbounds i32, ptr [[PTR1:%.*]], i64 [[ADD]] +; NO-VP-NEXT: [[GEPL1:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i64 [[ADD]] ; NO-VP-NEXT: [[V:%.*]] = load i32, ptr [[GEPL1]], align 4 -; NO-VP-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[ADD]] +; NO-VP-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]] ; NO-VP-NEXT: store i32 [[V]], ptr [[GEPS]], align 4 ; NO-VP-NEXT: br label [[FOR_INC]] ; NO-VP: for.inc: ; NO-VP-NEXT: [[INC]] = add i32 [[I]], 1 ; NO-VP-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024 -; NO-VP-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND:%.*]] +; NO-VP-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY1]], label [[LOOPEND]], !llvm.loop [[LOOP5:![0-9]+]] ; NO-VP: loopend: ; NO-VP-NEXT: ret void ; @@ -318,20 +408,49 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr ; ; NO-VP-LABEL: @multiple_reverse_vector_pointer( ; NO-VP-NEXT: entry: +; NO-VP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; NO-VP: vector.ph: ; NO-VP-NEXT: br label [[LOOP:%.*]] +; NO-VP: vector.body: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ] +; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1024, [[INDEX]] +; NO-VP-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[OFFSET_IDX]] +; NO-VP-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 0 +; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 -15 +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 +; NO-VP-NEXT: [[REVERSE:%.*]] = shufflevector <16 x i8> [[WIDE_LOAD]], <16 x i8> poison, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0> +; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[B:%.*]], <16 x i8> [[REVERSE]] +; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> [[TMP3]], i32 1, <16 x i1> splat (i1 true), <16 x i8> poison) +; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[OFFSET_IDX]] +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i32 0 +; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i32 -15 +; NO-VP-NEXT: [[REVERSE1:%.*]] = shufflevector <16 x i8> [[WIDE_MASKED_GATHER]], <16 x i8> poison, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0> +; NO-VP-NEXT: store <16 x i8> [[REVERSE1]], ptr [[TMP6]], align 1 +; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[D:%.*]], i64 [[OFFSET_IDX]] +; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i32 0 +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i32 -15 +; NO-VP-NEXT: store <16 x i8> [[REVERSE1]], ptr [[TMP9]], align 1 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: br label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[MIDDLE_BLOCK]] ], [ 1024, [[ENTRY:%.*]] ] +; NO-VP-NEXT: br label [[LOOP1:%.*]] ; NO-VP: loop: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 1024, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] -; NO-VP-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[IV]] +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP1]] ] +; NO-VP-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: [[X:%.*]] = load i8, ptr [[GEP_A]], align 1 -; NO-VP-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B:%.*]], i8 [[X]] +; NO-VP-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i8 [[X]] ; NO-VP-NEXT: [[Y:%.*]] = load i8, ptr [[GEP_B]], align 1 -; NO-VP-NEXT: [[GEP_C:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[IV]] +; NO-VP-NEXT: [[GEP_C:%.*]] = getelementptr i8, ptr [[C]], i64 [[IV]] ; NO-VP-NEXT: store i8 [[Y]], ptr [[GEP_C]], align 1 -; NO-VP-NEXT: [[GEP_D:%.*]] = getelementptr i8, ptr [[D:%.*]], i64 [[IV]] +; NO-VP-NEXT: [[GEP_D:%.*]] = getelementptr i8, ptr [[D]], i64 [[IV]] ; NO-VP-NEXT: store i8 [[Y]], ptr [[GEP_D]], align 1 ; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], -1 ; NO-VP-NEXT: [[CMP_NOT:%.*]] = icmp eq i64 [[IV]], 0 -; NO-VP-NEXT: br i1 [[CMP_NOT]], label [[EXIT:%.*]], label [[LOOP]] +; NO-VP-NEXT: br i1 [[CMP_NOT]], label [[EXIT:%.*]], label [[LOOP1]], !llvm.loop [[LOOP7:![0-9]+]] ; NO-VP: exit: ; NO-VP-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll index 2dd017c..a891eef 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-safe-dep-distance.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll @@ -1,12 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=IF-EVL ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=NO-VP ; Dependence distance between read and write is greater than the trip @@ -61,17 +59,44 @@ define void @test(ptr %p) { ; ; NO-VP-LABEL: @test( ; NO-VP-NEXT: entry: +; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 200, [[TMP1]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; NO-VP: vector.ph: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; NO-VP-NEXT: br label [[LOOP:%.*]] -; NO-VP: loop: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; NO-VP: vector.body: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; NO-VP-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[IV]] -; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 8 -; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 200 +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[A1]], align 8 +; NO-VP-NEXT: [[TMP7:%.*]] = add i64 [[IV]], 200 +; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP7]] +; NO-VP-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP8]], align 8 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]] +; NO-VP-NEXT: [[TMP9:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 200, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; NO-VP-NEXT: br label [[LOOP1:%.*]] +; NO-VP: loop: +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[LOOP1]] ] +; NO-VP-NEXT: [[A3:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV1]] +; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A3]], align 8 +; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV1]], 200 ; NO-VP-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] ; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 8 -; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]] +; NO-VP-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1 +; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV1]], 199 +; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP1]], label [[EXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; NO-VP: exit: ; NO-VP-NEXT: ret void ; @@ -143,7 +168,7 @@ define void @test_may_clobber1(ptr %p) { ; NO-VP-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP4]], align 32 ; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; NO-VP-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 -; NO-VP-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; NO-VP: middle.block: ; NO-VP-NEXT: br label [[EXIT:%.*]] ; NO-VP: scalar.ph: @@ -158,7 +183,7 @@ define void @test_may_clobber1(ptr %p) { ; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 32 ; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP3:![0-9]+]] +; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP5:![0-9]+]] ; NO-VP: exit: ; NO-VP-NEXT: ret void ; @@ -279,7 +304,7 @@ define void @test_may_clobber3(ptr %p) { ; NO-VP-NEXT: store <2 x i64> [[WIDE_LOAD]], ptr [[TMP4]], align 32 ; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; NO-VP-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200 -; NO-VP-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; NO-VP-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; NO-VP: middle.block: ; NO-VP-NEXT: br label [[EXIT:%.*]] ; NO-VP: scalar.ph: @@ -294,7 +319,7 @@ define void @test_may_clobber3(ptr %p) { ; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 32 ; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 ; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP5:![0-9]+]] +; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP7:![0-9]+]] ; NO-VP: exit: ; NO-VP-NEXT: ret void ; @@ -366,17 +391,44 @@ define void @trivial_due_max_vscale(ptr %p) { ; ; NO-VP-LABEL: @trivial_due_max_vscale( ; NO-VP-NEXT: entry: +; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 200, [[TMP1]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; NO-VP: vector.ph: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 ; NO-VP-NEXT: br label [[LOOP:%.*]] -; NO-VP: loop: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; NO-VP: vector.body: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; NO-VP-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[IV]] -; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 8192 +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[A1]], align 32 +; NO-VP-NEXT: [[TMP7:%.*]] = add i64 [[IV]], 8192 +; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP7]] +; NO-VP-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP8]], align 32 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]] +; NO-VP-NEXT: [[TMP9:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 200, [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; NO-VP-NEXT: br label [[LOOP1:%.*]] +; NO-VP: loop: +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[LOOP1]] ] +; NO-VP-NEXT: [[A3:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV1]] +; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A3]], align 32 +; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV1]], 8192 ; NO-VP-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] ; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199 -; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]] +; NO-VP-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1 +; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV1]], 199 +; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP1]], label [[EXIT]], !llvm.loop [[LOOP9:![0-9]+]] ; NO-VP: exit: ; NO-VP-NEXT: ret void ; @@ -448,17 +500,34 @@ define void @no_high_lmul_or_interleave(ptr %p) { ; ; NO-VP-LABEL: @no_high_lmul_or_interleave( ; NO-VP-NEXT: entry: +; NO-VP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; NO-VP: vector.ph: ; NO-VP-NEXT: br label [[LOOP:%.*]] -; NO-VP: loop: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; NO-VP: vector.body: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; NO-VP-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[IV]] -; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32 -; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 1024 +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[A1]], align 32 +; NO-VP-NEXT: [[TMP1:%.*]] = add i64 [[IV]], 1024 +; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; NO-VP-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP2]], align 32 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 4 +; NO-VP-NEXT: [[TMP3:%.*]] = icmp eq i64 [[IV_NEXT]], 3000 +; NO-VP-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP10:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: br label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 3000, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; NO-VP-NEXT: br label [[LOOP1:%.*]] +; NO-VP: loop: +; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[LOOP1]] ] +; NO-VP-NEXT: [[A3:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV1]] +; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A3]], align 32 +; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV1]], 1024 ; NO-VP-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] ; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 32 -; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 3001 -; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]] +; NO-VP-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1 +; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV1]], 3001 +; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP1]], label [[EXIT:%.*]], !llvm.loop [[LOOP11:![0-9]+]] ; NO-VP: exit: ; NO-VP-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-uniform-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll index 984b64c..8ab3817 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-uniform-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5 -; RUN: opt < %s --prefer-predicate-over-epilogue=predicate-dont-vectorize --passes=loop-vectorize -mcpu=sifive-p470 -mattr=+v,+f -force-tail-folding-style=data-with-evl -S | FileCheck %s +; RUN: opt < %s --prefer-predicate-over-epilogue=predicate-else-scalar-epilogue --passes=loop-vectorize -mcpu=sifive-p470 -mattr=+v,+f -S | FileCheck %s ; Generated from issue #109468. ; In this test case, the vector store with tail mask will transfer to the vp intrinsic with EVL. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll index 1e26d18..4da31a0 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=loop-vectorize -force-tail-folding-style=data-with-evl -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s +; RUN: opt -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s ; Make sure we don't crash when transforming a VPWidenCastRecipe created without ; an underlying value to an EVL recipe. This occurs in this test via @@ -25,13 +25,7 @@ define void @truncate_to_minimal_bitwidths_widen_cast_recipe(ptr %src) { ; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[AVL:%.*]] = sub i64 9, [[EVL_BASED_IV]] ; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[EVL_BASED_IV]] -; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr align 1 [[TMP5]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP7]]) -; CHECK-NEXT: [[TMP8:%.*]] = zext <vscale x 8 x i8> [[VP_OP_LOAD]] to <vscale x 8 x i16> -; CHECK-NEXT: [[TMP12:%.*]] = mul <vscale x 8 x i16> zeroinitializer, [[TMP8]] -; CHECK-NEXT: [[TMP13:%.*]] = lshr <vscale x 8 x i16> [[TMP12]], splat (i16 1) -; CHECK-NEXT: [[TMP14:%.*]] = trunc <vscale x 8 x i16> [[TMP13]] to <vscale x 8 x i8> -; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i8.nxv8p0(<vscale x 8 x i8> [[TMP14]], <vscale x 8 x ptr> align 1 zeroinitializer, <vscale x 8 x i1> splat (i1 true), i32 [[TMP7]]) +; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i8.nxv8p0(<vscale x 8 x i8> zeroinitializer, <vscale x 8 x ptr> align 1 zeroinitializer, <vscale x 8 x i1> splat (i1 true), i32 [[TMP7]]) ; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP7]] to i64 ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll index 0e1ec57..bb2e099 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -passes=loop-vectorize -force-tail-folding-style=data-with-evl -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s +; RUN: opt -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s ; This test tries to recreate the conditions for a crash that occurred when the ; VPTypeAnalysis cache wasn't cleared after a recipe was erased and clobbered @@ -39,15 +39,13 @@ define void @type_info_cache_clobber(ptr %dstv, ptr %src, i64 %wide.trip.count) ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[EVL_BASED_IV]] ; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr align 1 [[TMP13]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP11]]), !alias.scope [[META0:![0-9]+]] ; CHECK-NEXT: [[TMP15:%.*]] = zext <vscale x 8 x i8> [[VP_OP_LOAD]] to <vscale x 8 x i32> -; CHECK-NEXT: [[VP_OP:%.*]] = mul <vscale x 8 x i32> [[TMP15]], zeroinitializer ; CHECK-NEXT: [[TMP23:%.*]] = ashr <vscale x 8 x i32> [[TMP15]], zeroinitializer ; CHECK-NEXT: [[VP_OP3:%.*]] = or <vscale x 8 x i32> [[TMP23]], zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = icmp ult <vscale x 8 x i32> [[TMP15]], zeroinitializer ; CHECK-NEXT: [[TMP17:%.*]] = select <vscale x 8 x i1> [[TMP16]], <vscale x 8 x i32> [[VP_OP3]], <vscale x 8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP24:%.*]] = trunc <vscale x 8 x i32> [[TMP17]] to <vscale x 8 x i8> ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i8.nxv8p0(<vscale x 8 x i8> [[TMP24]], <vscale x 8 x ptr> align 1 [[BROADCAST_SPLAT]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP11]]), !alias.scope [[META3:![0-9]+]], !noalias [[META0]] -; CHECK-NEXT: [[TMP19:%.*]] = trunc <vscale x 8 x i32> [[VP_OP]] to <vscale x 8 x i16> -; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> [[TMP19]], <vscale x 8 x ptr> align 2 zeroinitializer, <vscale x 8 x i1> splat (i1 true), i32 [[TMP11]]) +; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 zeroinitializer, <vscale x 8 x i1> splat (i1 true), i32 [[TMP11]]) ; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP11]] to i64 ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll index 9062542..d3c2ccf 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5 ; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on -riscv-v-vector-bits-min=0 -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=SCALABLE ; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=off -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=FIXEDLEN -; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on -riscv-v-vector-bits-min=0 -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=TF-SCALABLE +; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on -riscv-v-vector-bits-min=0 -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=TF-SCALABLE target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" target triple = "riscv64" @@ -226,17 +226,44 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-LABEL: define i64 @uniform_load_outside_use( ; TF-SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { ; TF-SCALABLE-NEXT: [[ENTRY:.*]]: -; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] -; TF-SCALABLE: [[FOR_BODY]]: -; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2 +; TF-SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] +; TF-SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; TF-SCALABLE: [[VECTOR_PH]]: +; TF-SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; TF-SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 +; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] +; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] +; TF-SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; TF-SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2 +; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]] +; TF-SCALABLE: [[VECTOR_BODY]]: +; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8 +; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0 +; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer ; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]] -; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 -; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; TF-SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[ARRAYIDX]], align 8 +; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP5]] +; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; TF-SCALABLE: [[MIDDLE_BLOCK]]: +; TF-SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]] +; TF-SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; TF-SCALABLE: [[SCALAR_PH]]: +; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]] +; TF-SCALABLE: [[FOR_BODY]]: +; TF-SCALABLE-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; TF-SCALABLE-NEXT: [[V1:%.*]] = load i64, ptr [[B]], align 8 +; TF-SCALABLE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]] +; TF-SCALABLE-NEXT: store i64 [[V1]], ptr [[ARRAYIDX1]], align 8 +; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1 ; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END:.*]], label %[[FOR_BODY]] +; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: -; TF-SCALABLE-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V]], %[[FOR_BODY]] ] +; TF-SCALABLE-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V1]], %[[FOR_BODY]] ], [ [[V]], %[[MIDDLE_BLOCK]] ] ; TF-SCALABLE-NEXT: ret i64 [[V_LCSSA]] ; entry: @@ -404,7 +431,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]] ; TF-SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[SCALAR_PH]]: @@ -423,7 +450,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; TF-SCALABLE-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8 ; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -555,7 +582,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] ; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]] ; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[SCALAR_PH]]: @@ -568,7 +595,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -693,7 +720,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]] ; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[SCALAR_PH]]: @@ -706,7 +733,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -858,7 +885,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]] ; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[SCALAR_PH]]: @@ -871,7 +898,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -1037,7 +1064,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] ; TF-SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[SCALAR_PH]]: @@ -1055,7 +1082,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; @@ -1186,7 +1213,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]] ; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]] ; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; TF-SCALABLE: [[MIDDLE_BLOCK]]: ; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]] ; TF-SCALABLE: [[SCALAR_PH]]: @@ -1199,7 +1226,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap ; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8 ; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025 -; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; TF-SCALABLE: [[FOR_END]]: ; TF-SCALABLE-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll index 74eb094..7537a43 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll @@ -1,12 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=IF-EVL %s ; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=NO-VP %s define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll index 9652351..7f1066c 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll @@ -1,8 +1,7 @@ ; REQUIRES: asserts ; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefix=IF-EVL %s define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) { diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll index 3ec48ef..c1b656a 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll @@ -1,7 +1,6 @@ ; REQUIRES: asserts ; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefix=IF-EVL %s define void @vp_sext(ptr %a, ptr %b, i64 %N) { diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll index 7f29213..9900602 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll @@ -1,8 +1,7 @@ ; REQUIRES: asserts ; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefix=IF-EVL %s define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) { diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll index baf546b..1c9554d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll @@ -1,25 +1,21 @@ ; REQUIRES: asserts ; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=IF-EVL-OUTLOOP,IF-EVL %s ; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ ; RUN: -prefer-inloop-reductions \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=IF-EVL-INLOOP,IF-EVL %s ; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=NO-VP-OUTLOOP %s ; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ ; RUN: -prefer-inloop-reductions \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=NO-VP-INLOOP %s diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll index 97a6130..42a846a 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll @@ -1,13 +1,11 @@ ; REQUIRES: asserts ; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=IF-EVL,CHECK %s ; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=NO-VP,CHECK %s define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll index 1ad75bb..3d44317 100644 --- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll +++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination-early-exit.ll @@ -281,3 +281,147 @@ exit: %res = phi i64 [ %iv, %loop.header ], [ 1, %loop.latch ] ret i64 %res } + +define i8 @test_early_exit_max_vector_tc_eq_16(ptr dereferenceable(17) %A) nosync nofree { +; VF8UF1-LABEL: define i8 @test_early_exit_max_vector_tc_eq_16( +; VF8UF1-SAME: ptr dereferenceable(17) [[A:%.*]]) #[[ATTR0]] { +; VF8UF1-NEXT: [[ENTRY:.*]]: +; VF8UF1-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; VF8UF1: [[VECTOR_PH]]: +; VF8UF1-NEXT: br label %[[VECTOR_BODY:.*]] +; VF8UF1: [[VECTOR_BODY]]: +; VF8UF1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; VF8UF1-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; VF8UF1-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 +; VF8UF1-NEXT: [[TMP1:%.*]] = icmp eq <8 x i8> [[WIDE_LOAD]], zeroinitializer +; VF8UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; VF8UF1-NEXT: [[TMP2:%.*]] = call i1 @llvm.vector.reduce.or.v8i1(<8 x i1> [[TMP1]]) +; VF8UF1-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; VF8UF1-NEXT: [[TMP4:%.*]] = or i1 [[TMP2]], [[TMP3]] +; VF8UF1-NEXT: br i1 [[TMP4]], label %[[MIDDLE_SPLIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; VF8UF1: [[MIDDLE_SPLIT]]: +; VF8UF1-NEXT: br i1 [[TMP2]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[MIDDLE_BLOCK:.*]] +; VF8UF1: [[MIDDLE_BLOCK]]: +; VF8UF1-NEXT: br label %[[SCALAR_PH]] +; VF8UF1: [[VECTOR_EARLY_EXIT]]: +; VF8UF1-NEXT: br label %[[EXIT:.*]] +; VF8UF1: [[SCALAR_PH]]: +; VF8UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; VF8UF1-NEXT: br label %[[LOOP_HEADER:.*]] +; VF8UF1: [[LOOP_HEADER]]: +; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; VF8UF1-NEXT: [[P_SRC:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] +; VF8UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC]], align 1 +; VF8UF1-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 +; VF8UF1-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] +; VF8UF1: [[LOOP_LATCH]]: +; VF8UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; VF8UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 17 +; VF8UF1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP7:![0-9]+]] +; VF8UF1: [[EXIT]]: +; VF8UF1-NEXT: [[RES:%.*]] = phi i8 [ 0, %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] +; VF8UF1-NEXT: ret i8 [[RES]] +; +; VF8UF2-LABEL: define i8 @test_early_exit_max_vector_tc_eq_16( +; VF8UF2-SAME: ptr dereferenceable(17) [[A:%.*]]) #[[ATTR0]] { +; VF8UF2-NEXT: [[ENTRY:.*]]: +; VF8UF2-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; VF8UF2: [[VECTOR_PH]]: +; VF8UF2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF8UF2: [[VECTOR_BODY]]: +; VF8UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; VF8UF2-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; VF8UF2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i32 8 +; VF8UF2-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1 +; VF8UF2-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i8>, ptr [[TMP1]], align 1 +; VF8UF2-NEXT: [[TMP2:%.*]] = icmp eq <8 x i8> [[WIDE_LOAD]], zeroinitializer +; VF8UF2-NEXT: [[TMP3:%.*]] = icmp eq <8 x i8> [[WIDE_LOAD1]], zeroinitializer +; VF8UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; VF8UF2-NEXT: [[TMP4:%.*]] = or <8 x i1> [[TMP2]], [[TMP3]] +; VF8UF2-NEXT: [[TMP5:%.*]] = call i1 @llvm.vector.reduce.or.v8i1(<8 x i1> [[TMP4]]) +; VF8UF2-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; VF8UF2-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]] +; VF8UF2-NEXT: br i1 [[TMP7]], label %[[MIDDLE_SPLIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF8UF2: [[MIDDLE_SPLIT]]: +; VF8UF2-NEXT: br i1 [[TMP5]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[MIDDLE_BLOCK:.*]] +; VF8UF2: [[MIDDLE_BLOCK]]: +; VF8UF2-NEXT: br label %[[SCALAR_PH]] +; VF8UF2: [[VECTOR_EARLY_EXIT]]: +; VF8UF2-NEXT: br label %[[EXIT:.*]] +; VF8UF2: [[SCALAR_PH]]: +; VF8UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; VF8UF2-NEXT: br label %[[LOOP_HEADER:.*]] +; VF8UF2: [[LOOP_HEADER]]: +; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; VF8UF2-NEXT: [[P_SRC:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] +; VF8UF2-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC]], align 1 +; VF8UF2-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 +; VF8UF2-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] +; VF8UF2: [[LOOP_LATCH]]: +; VF8UF2-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; VF8UF2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 17 +; VF8UF2-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP5:![0-9]+]] +; VF8UF2: [[EXIT]]: +; VF8UF2-NEXT: [[RES:%.*]] = phi i8 [ 0, %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] +; VF8UF2-NEXT: ret i8 [[RES]] +; +; VF16UF1-LABEL: define i8 @test_early_exit_max_vector_tc_eq_16( +; VF16UF1-SAME: ptr dereferenceable(17) [[A:%.*]]) #[[ATTR0]] { +; VF16UF1-NEXT: [[ENTRY:.*]]: +; VF16UF1-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; VF16UF1: [[VECTOR_PH]]: +; VF16UF1-NEXT: br label %[[VECTOR_BODY:.*]] +; VF16UF1: [[VECTOR_BODY]]: +; VF16UF1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; VF16UF1-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] +; VF16UF1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 +; VF16UF1-NEXT: [[TMP1:%.*]] = icmp eq <16 x i8> [[WIDE_LOAD]], zeroinitializer +; VF16UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; VF16UF1-NEXT: [[TMP2:%.*]] = call i1 @llvm.vector.reduce.or.v16i1(<16 x i1> [[TMP1]]) +; VF16UF1-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; VF16UF1-NEXT: [[TMP4:%.*]] = or i1 [[TMP2]], [[TMP3]] +; VF16UF1-NEXT: br i1 [[TMP4]], label %[[MIDDLE_SPLIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; VF16UF1: [[MIDDLE_SPLIT]]: +; VF16UF1-NEXT: br i1 [[TMP2]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[MIDDLE_BLOCK:.*]] +; VF16UF1: [[MIDDLE_BLOCK]]: +; VF16UF1-NEXT: br label %[[SCALAR_PH]] +; VF16UF1: [[VECTOR_EARLY_EXIT]]: +; VF16UF1-NEXT: br label %[[EXIT:.*]] +; VF16UF1: [[SCALAR_PH]]: +; VF16UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; VF16UF1-NEXT: br label %[[LOOP_HEADER:.*]] +; VF16UF1: [[LOOP_HEADER]]: +; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; VF16UF1-NEXT: [[P_SRC:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]] +; VF16UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC]], align 1 +; VF16UF1-NEXT: [[C:%.*]] = icmp eq i8 [[L]], 0 +; VF16UF1-NEXT: br i1 [[C]], label %[[EXIT]], label %[[LOOP_LATCH]] +; VF16UF1: [[LOOP_LATCH]]: +; VF16UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; VF16UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 17 +; VF16UF1-NEXT: br i1 [[CMP]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP5:![0-9]+]] +; VF16UF1: [[EXIT]]: +; VF16UF1-NEXT: [[RES:%.*]] = phi i8 [ 0, %[[LOOP_HEADER]] ], [ 1, %[[LOOP_LATCH]] ], [ 0, %[[VECTOR_EARLY_EXIT]] ] +; VF16UF1-NEXT: ret i8 [[RES]] +; +entry: + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] + %p.src = getelementptr inbounds i8, ptr %A, i64 %iv + %l = load i8, ptr %p.src, align 1 + %c = icmp eq i8 %l, 0 + br i1 %c, label %exit, label %loop.latch + +loop.latch: + %iv.next = add nsw i64 %iv, 1 + %cmp = icmp eq i64 %iv.next, 17 + br i1 %cmp, label %exit, label %loop.header + +exit: + %res = phi i8 [ 0, %loop.header ], [ 1, %loop.latch ] + ret i8 %res +} + + diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll index b396e29..59c76ae 100644 --- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll +++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll @@ -1218,6 +1218,133 @@ exit: ret void } +define void @test_vector_tc_eq_16(ptr %A) { +; VF8UF1-LABEL: define void @test_vector_tc_eq_16( +; VF8UF1-SAME: ptr [[A:%.*]]) { +; VF8UF1-NEXT: [[ENTRY:.*]]: +; VF8UF1-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; VF8UF1: [[VECTOR_PH]]: +; VF8UF1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 16 +; VF8UF1-NEXT: br label %[[VECTOR_BODY:.*]] +; VF8UF1: [[VECTOR_BODY]]: +; VF8UF1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; VF8UF1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] +; VF8UF1-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[NEXT_GEP]], align 1 +; VF8UF1-NEXT: [[TMP1:%.*]] = add nsw <8 x i8> [[WIDE_LOAD]], splat (i8 10) +; VF8UF1-NEXT: store <8 x i8> [[TMP1]], ptr [[NEXT_GEP]], align 1 +; VF8UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; VF8UF1-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; VF8UF1-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; VF8UF1: [[MIDDLE_BLOCK]]: +; VF8UF1-NEXT: br label %[[SCALAR_PH]] +; VF8UF1: [[SCALAR_PH]]: +; VF8UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; VF8UF1-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[TMP0]], %[[MIDDLE_BLOCK]] ], [ [[A]], %[[ENTRY]] ] +; VF8UF1-NEXT: br label %[[LOOP:.*]] +; VF8UF1: [[LOOP]]: +; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF8UF1-NEXT: [[P_SRC:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], %[[SCALAR_PH]] ], [ [[P_SRC_NEXT:%.*]], %[[LOOP]] ] +; VF8UF1-NEXT: [[P_SRC_NEXT]] = getelementptr inbounds i8, ptr [[P_SRC]], i64 1 +; VF8UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC]], align 1 +; VF8UF1-NEXT: [[ADD:%.*]] = add nsw i8 [[L]], 10 +; VF8UF1-NEXT: store i8 [[ADD]], ptr [[P_SRC]], align 1 +; VF8UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; VF8UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 17 +; VF8UF1-NEXT: br i1 [[CMP]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]] +; VF8UF1: [[EXIT]]: +; VF8UF1-NEXT: ret void +; +; VF8UF2-LABEL: define void @test_vector_tc_eq_16( +; VF8UF2-SAME: ptr [[A:%.*]]) { +; VF8UF2-NEXT: [[ENTRY:.*]]: +; VF8UF2-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; VF8UF2: [[VECTOR_PH]]: +; VF8UF2-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 16 +; VF8UF2-NEXT: br label %[[VECTOR_BODY:.*]] +; VF8UF2: [[VECTOR_BODY]]: +; VF8UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; VF8UF2-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] +; VF8UF2-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 8 +; VF8UF2-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[NEXT_GEP]], align 1 +; VF8UF2-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i8>, ptr [[TMP1]], align 1 +; VF8UF2-NEXT: [[TMP2:%.*]] = add nsw <8 x i8> [[WIDE_LOAD]], splat (i8 10) +; VF8UF2-NEXT: [[TMP3:%.*]] = add nsw <8 x i8> [[WIDE_LOAD1]], splat (i8 10) +; VF8UF2-NEXT: store <8 x i8> [[TMP2]], ptr [[NEXT_GEP]], align 1 +; VF8UF2-NEXT: store <8 x i8> [[TMP3]], ptr [[TMP1]], align 1 +; VF8UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; VF8UF2-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; VF8UF2-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; VF8UF2: [[MIDDLE_BLOCK]]: +; VF8UF2-NEXT: br label %[[SCALAR_PH]] +; VF8UF2: [[SCALAR_PH]]: +; VF8UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; VF8UF2-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[TMP0]], %[[MIDDLE_BLOCK]] ], [ [[A]], %[[ENTRY]] ] +; VF8UF2-NEXT: br label %[[LOOP:.*]] +; VF8UF2: [[LOOP]]: +; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF8UF2-NEXT: [[P_SRC:%.*]] = phi ptr [ [[BC_RESUME_VAL2]], %[[SCALAR_PH]] ], [ [[P_SRC_NEXT:%.*]], %[[LOOP]] ] +; VF8UF2-NEXT: [[P_SRC_NEXT]] = getelementptr inbounds i8, ptr [[P_SRC]], i64 1 +; VF8UF2-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC]], align 1 +; VF8UF2-NEXT: [[ADD:%.*]] = add nsw i8 [[L]], 10 +; VF8UF2-NEXT: store i8 [[ADD]], ptr [[P_SRC]], align 1 +; VF8UF2-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; VF8UF2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 17 +; VF8UF2-NEXT: br i1 [[CMP]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; VF8UF2: [[EXIT]]: +; VF8UF2-NEXT: ret void +; +; VF16UF1-LABEL: define void @test_vector_tc_eq_16( +; VF16UF1-SAME: ptr [[A:%.*]]) { +; VF16UF1-NEXT: [[ENTRY:.*]]: +; VF16UF1-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; VF16UF1: [[VECTOR_PH]]: +; VF16UF1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 16 +; VF16UF1-NEXT: br label %[[VECTOR_BODY:.*]] +; VF16UF1: [[VECTOR_BODY]]: +; VF16UF1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; VF16UF1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] +; VF16UF1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; VF16UF1-NEXT: [[TMP1:%.*]] = add nsw <16 x i8> [[WIDE_LOAD]], splat (i8 10) +; VF16UF1-NEXT: store <16 x i8> [[TMP1]], ptr [[NEXT_GEP]], align 1 +; VF16UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; VF16UF1-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; VF16UF1-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; VF16UF1: [[MIDDLE_BLOCK]]: +; VF16UF1-NEXT: br label %[[SCALAR_PH]] +; VF16UF1: [[SCALAR_PH]]: +; VF16UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; VF16UF1-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[TMP0]], %[[MIDDLE_BLOCK]] ], [ [[A]], %[[ENTRY]] ] +; VF16UF1-NEXT: br label %[[LOOP:.*]] +; VF16UF1: [[LOOP]]: +; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF16UF1-NEXT: [[P_SRC:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], %[[SCALAR_PH]] ], [ [[P_SRC_NEXT:%.*]], %[[LOOP]] ] +; VF16UF1-NEXT: [[P_SRC_NEXT]] = getelementptr inbounds i8, ptr [[P_SRC]], i64 1 +; VF16UF1-NEXT: [[L:%.*]] = load i8, ptr [[P_SRC]], align 1 +; VF16UF1-NEXT: [[ADD:%.*]] = add nsw i8 [[L]], 10 +; VF16UF1-NEXT: store i8 [[ADD]], ptr [[P_SRC]], align 1 +; VF16UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; VF16UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 17 +; VF16UF1-NEXT: br i1 [[CMP]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; VF16UF1: [[EXIT]]: +; VF16UF1-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %p.src = phi ptr [ %A, %entry ], [ %p.src.next, %loop ] + %p.src.next = getelementptr inbounds i8, ptr %p.src, i64 1 + %l = load i8, ptr %p.src, align 1 + %add = add nsw i8 %l, 10 + store i8 %add, ptr %p.src + %iv.next = add nsw i64 %iv, 1 + %cmp = icmp eq i64 %iv.next, 17 + br i1 %cmp, label %exit, label %loop + +exit: + ret void +} ;. ; VF8UF1: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; VF8UF1: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} @@ -1227,6 +1354,8 @@ exit: ; VF8UF1: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} ; VF8UF1: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]} ; VF8UF1: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]} +; VF8UF1: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} +; VF8UF1: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]} ;. ; VF8UF2: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; VF8UF2: [[META1]] = !{!"llvm.loop.unroll.runtime.disable"} @@ -1234,6 +1363,8 @@ exit: ; VF8UF2: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} ; VF8UF2: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} ; VF8UF2: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} +; VF8UF2: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]} +; VF8UF2: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} ;. ; VF16UF1: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; VF16UF1: [[META1]] = !{!"llvm.loop.unroll.runtime.disable"} @@ -1241,4 +1372,6 @@ exit: ; VF16UF1: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} ; VF16UF1: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} ; VF16UF1: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} +; VF16UF1: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]} +; VF16UF1: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} ;. diff --git a/llvm/test/Transforms/MemProfContextDisambiguation/func_assign_fix.ll b/llvm/test/Transforms/MemProfContextDisambiguation/func_assign_fix.ll new file mode 100644 index 0000000..d0450e0 --- /dev/null +++ b/llvm/test/Transforms/MemProfContextDisambiguation/func_assign_fix.ll @@ -0,0 +1,130 @@ +;; Make sure we assign the original callsite to a function clone (which will be +;; the original function clone), even when we cannot update its caller (due to +;; missing metadata e.g. from mismatched profiles). Otherwise we will try to use +;; the original function for a different clone, leading to confusion later when +;; rewriting the calls. + +;; -stats requires asserts +; REQUIRES: asserts + +; RUN: opt -passes=memprof-context-disambiguation -supports-hot-cold-new \ +; RUN: -memprof-verify-ccg -memprof-verify-nodes -stats -debug \ +; RUN: -pass-remarks=memprof-context-disambiguation %s -S 2>&1 | \ +; RUN: FileCheck %s --implicit-check-not="Mismatch in call clone assignment" \ +; RUN: --implicit-check-not="Number of callsites assigned to call multiple non-matching clones" + + +; ModuleID = '<stdin>' +source_filename = "reduced.ll" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +; CHECK-LABEL: define void @A() +define void @A() { + ; CHECK: call void @C() + call void @C() + ret void +} + +; CHECK-LABEL: define void @B() +define void @B() { + ; CHECK: call void @C.memprof.1() + call void @C(), !callsite !1 + ret void +} + +; CHECK-LABEL: define void @C() +define void @C() { + ; CHECK: call void @F() + call void @F(), !callsite !16 + ; CHECK: call void @D() + call void @D(), !callsite !2 + ret void +} + +; CHECK-LABEL: define void @D() +define void @D() { + ; CHECK: call void @E() + call void @E(), !callsite !3 + ; CHECK: call void @G() + call void @G(), !callsite !17 + ret void +} + +; CHECK-LABEL: define void @E() +define void @E() { + ; CHECK: call ptr @_Znwm(i64 0) #[[NOTCOLD:[0-9]+]] + %1 = call ptr @_Znwm(i64 0), !memprof !4, !callsite !9 + ret void +} + +; CHECK-LABEL: define void @F() +define void @F() { + ; CHECK: call void @G() + call void @G(), !callsite !17 + ret void +} + +; CHECK-LABEL: define void @G() +define void @G() { + ; CHECK: call ptr @_Znwm(i64 0) #[[NOTCOLD]] + %2 = call ptr @_Znwm(i64 0), !memprof !10, !callsite !15 + ret void +} + +; CHECK-LABEL: define void @A1() +define void @A1() { + ; CHECK: call void @C() + call void @C(), !callsite !18 + ret void +} + +; CHECK-LABEL: define void @B1() +define void @B1() { + ; CHECK: call void @C.memprof.1() + call void @C(), !callsite !19 + ret void +} + +; CHECK-LABEL: define void @C.memprof.1() + ; CHECK: call void @F.memprof.1() + ; CHECK: call void @D.memprof.1() + +; CHECK-LABEL: define void @D.memprof.1() + ; CHECK: call void @E.memprof.1() + ; CHECK: call void @G() + +; CHECK-LABEL: define void @E.memprof.1() + ; CHECK: call ptr @_Znwm(i64 0) #[[COLD:[0-9]+]] + +; CHECK-LABEL: define void @F.memprof.1() + ; CHECK: call void @G.memprof.1() + +; CHECK-LABEL: define void @G.memprof.1() + ; CHECK: call ptr @_Znwm(i64 0) #[[COLD]] + +declare ptr @_Znwm(i64) + +; IR: attributes #[[NOTCOLD]] = { "memprof"="notcold" } +; IR: attributes #[[COLD]] = { "memprof"="cold" } + +!0 = !{i64 123} +!1 = !{i64 234} +!2 = !{i64 345} +!3 = !{i64 456} +!4 = !{!5, !7} +!5 = !{!6, !"notcold"} +!6 = !{i64 567, i64 456, i64 345, i64 123} +!7 = !{!8, !"cold"} +!8 = !{i64 567, i64 456, i64 345, i64 234} +!9 = !{i64 567} +!10 = !{!11, !13} +!11 = !{!12, !"notcold"} +!12 = !{i64 678, i64 891, i64 789, i64 912} +!13 = !{!14, !"cold"} +!14 = !{i64 678, i64 891, i64 789, i64 812} +!15 = !{i64 678} +!16 = !{i64 789} +!17 = !{i64 891} +!18 = !{i64 912} +!19 = !{i64 812} |