aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Analysis/BasicAA/modref.ll10
-rw-r--r--llvm/test/Analysis/BasicAA/phi-values-usage.ll4
-rw-r--r--llvm/test/Analysis/CallGraph/ignore-assumelike-calls.ll4
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/sve-arith-fp.ll52
-rw-r--r--llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll3
-rw-r--r--llvm/test/Analysis/CostModel/X86/free-intrinsics.ll16
-rw-r--r--llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll16
-rw-r--r--llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll16
-rw-r--r--llvm/test/Analysis/DDG/basic-loopnest.ll5
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/Coupled.ll4
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/DADelin.ll85
-rw-r--r--llvm/test/Analysis/KernelInfo/openmp/nvptx.ll18
-rw-r--r--llvm/test/Analysis/LazyValueAnalysis/invalidation.ll8
-rw-r--r--llvm/test/Analysis/MemorySSA/lifetime-simple.ll12
-rw-r--r--llvm/test/Analysis/MemorySSA/phi-translation.ll8
-rw-r--r--llvm/test/Analysis/MemorySSA/pr43044.ll2
-rw-r--r--llvm/test/Analysis/MemorySSA/pr49859.ll22
-rw-r--r--llvm/test/Analysis/MemorySSA/renamephis.ll2
-rw-r--r--llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll4
-rw-r--r--llvm/test/Analysis/ScalarEvolution/sdiv.ll8
-rw-r--r--llvm/test/Analysis/ScalarEvolution/srem.ll8
-rw-r--r--llvm/test/Analysis/ScopedNoAliasAA/alias-scope-merging.ll8
-rw-r--r--llvm/test/Analysis/StackSafetyAnalysis/lifetime.ll410
-rw-r--r--llvm/test/Analysis/StackSafetyAnalysis/local.ll56
-rw-r--r--llvm/test/Assembler/auto_upgrade_intrinsics.ll12
-rw-r--r--llvm/test/Assembler/autoupgrade-lifetime-intrinsics.ll17
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-histcnt-dag-combine-hang.ll70
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll38
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vabs.ll58
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vmul.ll73
-rw-r--r--llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir20
-rw-r--r--llvm/test/CodeGen/AArch64/lifetime-poison.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll84
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir8
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16-math.ll21
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll116
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll1524
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll195
-rw-r--r--llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll47
-rw-r--r--llvm/test/CodeGen/AMDGPU/wqm.mir277
-rw-r--r--llvm/test/CodeGen/ARM/scmp.ll48
-rw-r--r--llvm/test/CodeGen/ARM/ucmp.ll36
-rw-r--r--llvm/test/CodeGen/BPF/loop-exit-cond.ll16
-rw-r--r--llvm/test/CodeGen/BPF/vla.ll20
-rw-r--r--llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/variadics-lowering.ll16
-rw-r--r--llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir8
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll26
-rw-r--r--llvm/test/CodeGen/Thumb/scmp.ll489
-rw-r--r--llvm/test/CodeGen/Thumb/ucmp.ll445
-rw-r--r--llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll116
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll12
-rw-r--r--llvm/test/CodeGen/WebAssembly/returned.ll4
-rw-r--r--llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll12
-rw-r--r--llvm/test/CodeGen/X86/select-optimize.ll6
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll7
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/lifetime.ll87
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/remove-memory-effects.ll4
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/RISCV/exception-lifetime.ll16
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/RISCV/use-after-scope-setjmp.ll8
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/X86/alloca.ll16
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/exception-lifetime.ll16
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/stack-coloring.ll12
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll102
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll8
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll78
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg-kmsan.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll100
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll8
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll100
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/alloca.ll18
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll20
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll100
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll12
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll8
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/alloca.ll16
-rw-r--r--llvm/test/MC/RISCV/Relocations/align-after-relax.s50
-rw-r--r--llvm/test/MC/RISCV/Relocations/align-norvc.s23
-rw-r--r--llvm/test/MC/RISCV/Relocations/mc-dump.s8
-rw-r--r--llvm/test/MC/RISCV/align-option-relax.s23
-rw-r--r--llvm/test/MC/RISCV/align.s45
-rw-r--r--llvm/test/MC/RISCV/cfi-advance.s28
-rw-r--r--llvm/test/MC/RISCV/nop-slide.s16
-rw-r--r--llvm/test/Transforms/AddDiscriminators/call.ll8
-rw-r--r--llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll4
-rw-r--r--llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll8
-rw-r--r--llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll44
-rw-r--r--llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll8
-rw-r--r--llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll240
-rw-r--r--llvm/test/Transforms/Attributor/heap_to_stack.ll2
-rw-r--r--llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll2
-rw-r--r--llvm/test/Transforms/Attributor/liveness.ll16
-rw-r--r--llvm/test/Transforms/Attributor/noalias.ll16
-rw-r--r--llvm/test/Transforms/Attributor/openmp_parallel.ll52
-rw-r--r--llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll4
-rw-r--r--llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll88
-rw-r--r--llvm/test/Transforms/CodeExtractor/LoopExtractor_alloca.ll12
-rw-r--r--llvm/test/Transforms/CodeExtractor/PartialInlineAlloca.ll8
-rw-r--r--llvm/test/Transforms/CodeExtractor/PartialInlineAlloca2.ll8
-rw-r--r--llvm/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll10
-rw-r--r--llvm/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll8
-rw-r--r--llvm/test/Transforms/CodeExtractor/PartialInlineInvokeProducesOutVal.ll4
-rw-r--r--llvm/test/Transforms/CodeExtractor/live_shrink.ll12
-rw-r--r--llvm/test/Transforms/CodeExtractor/live_shrink_gep.ll8
-rw-r--r--llvm/test/Transforms/CodeExtractor/live_shrink_hoist.ll10
-rw-r--r--llvm/test/Transforms/CodeExtractor/live_shrink_multiple.ll12
-rw-r--r--llvm/test/Transforms/CodeExtractor/live_shrink_unsafe.ll16
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/ARM/tailcall-dup.ll8
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/X86/tailcall-assume-xbb.ll8
-rw-r--r--llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll16
-rw-r--r--llvm/test/Transforms/Coroutines/coro-alloca-06.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-alloca-07.ll6
-rw-r--r--llvm/test/Transforms/Coroutines/coro-alloca-08.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-alloca-09.ll57
-rw-r--r--llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll8
-rw-r--r--llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll6
-rw-r--r--llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll8
-rw-r--r--llvm/test/Transforms/Coroutines/coro-byval-param.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-elide-musttail.ll2
-rw-r--r--llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll16
-rw-r--r--llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll18
-rw-r--r--llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll18
-rw-r--r--llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll16
-rw-r--r--llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll18
-rw-r--r--llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll18
-rw-r--r--llvm/test/Transforms/Coroutines/coro-lifetime-end.ll14
-rw-r--r--llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll8
-rw-r--r--llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll4
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-02.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-dbg.ll4
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll22
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-musttail5.ll8
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-musttail6.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-musttail7.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll16
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll10
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll12
-rw-r--r--llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll4
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll8
-rw-r--r--llvm/test/Transforms/DCE/basic.ll8
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/batchaa-caching-new-pointers.ll36
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll12
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/dominate.ll6
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/libcalls.ll44
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/lifetime.ll16
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll12
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll10
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll4
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll6
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/simple.ll12
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll20
-rw-r--r--llvm/test/Transforms/EarlyCSE/memoryssa.ll40
-rw-r--r--llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll16
-rw-r--r--llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll32
-rw-r--r--llvm/test/Transforms/ExpandVariadics/indirect-calls.ll8
-rw-r--r--llvm/test/Transforms/ExpandVariadics/intrinsics.ll44
-rw-r--r--llvm/test/Transforms/ExpandVariadics/pass-byval-byref.ll24
-rw-r--r--llvm/test/Transforms/ExpandVariadics/pass-indirect.ll8
-rw-r--r--llvm/test/Transforms/ExpandVariadics/pass-integers.ll60
-rw-r--r--llvm/test/Transforms/GVN/cond_br2.ll8
-rw-r--r--llvm/test/Transforms/GVN/lifetime-simple.ll36
-rw-r--r--llvm/test/Transforms/GVN/opt-remarks.ll4
-rw-r--r--llvm/test/Transforms/GVN/vscale.ll12
-rw-r--r--llvm/test/Transforms/GVNHoist/pr29034.ll4
-rw-r--r--llvm/test/Transforms/GVNSink/lifetime.ll26
-rw-r--r--llvm/test/Transforms/GlobalOpt/dead-store-status.ll8
-rw-r--r--llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-1.ll28
-rw-r--r--llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-2.ll28
-rw-r--r--llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-3.ll16
-rw-r--r--llvm/test/Transforms/HotColdSplit/sink-multiple-bitcasts-of-allocas-pr42451.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/alloca-addrspace-1.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/alloca-addrspace.ll4
-rw-r--r--llvm/test/Transforms/IROutliner/different-intrinsics.ll24
-rw-r--r--llvm/test/Transforms/IROutliner/different-order-phi-merges.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll4
-rw-r--r--llvm/test/Transforms/IROutliner/extraction.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/gvn-output-set-overload.ll12
-rw-r--r--llvm/test/Transforms/IROutliner/gvn-phi-debug.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/illegal-assumes.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/illegal-memcpy.ll24
-rw-r--r--llvm/test/Transforms/IROutliner/illegal-memmove.ll24
-rw-r--r--llvm/test/Transforms/IROutliner/illegal-vaarg.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/mismatched-phi-exits-not-in-first-outlined.ll4
-rw-r--r--llvm/test/Transforms/IROutliner/mismatched-phi-exits.ll4
-rw-r--r--llvm/test/Transforms/IROutliner/mismatched-phi-outputs-ordering.ll20
-rw-r--r--llvm/test/Transforms/IROutliner/must-capture-all-phi-nodes-begin.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/no-external-block-entries.ll4
-rw-r--r--llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll4
-rw-r--r--llvm/test/Transforms/IROutliner/outline-memcpy.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/outline-memmove.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-bitcasts.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-branches-phi-nodes.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-cost-model.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-different-output-blocks.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-exits-to-phi-node.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-multiple-exits-diff-outputs.ll32
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-multiple-exits-one-output-set.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-multiple-exits.ll32
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-remapped-outputs.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/outlining-same-output-blocks.ll16
-rw-r--r--llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/phi-nodes-output-overload.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/phi-nodes-parent-block-referential.ll8
-rw-r--r--llvm/test/Transforms/IROutliner/region-inputs-in-phi-nodes.ll8
-rw-r--r--llvm/test/Transforms/IndVarSimplify/exit_value_test2.ll12
-rw-r--r--llvm/test/Transforms/InferAddressSpaces/AMDGPU/lifetime.ll8
-rw-r--r--llvm/test/Transforms/InferAddressSpaces/NVPTX/lifetime.ll12
-rw-r--r--llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll4
-rw-r--r--llvm/test/Transforms/Inline/ML/state-tracking-coro.ll8
-rw-r--r--llvm/test/Transforms/Inline/SystemZ/inline-target-attr.ll18
-rw-r--r--llvm/test/Transforms/Inline/access-attributes-prop.ll16
-rw-r--r--llvm/test/Transforms/Inline/byval-align.ll4
-rw-r--r--llvm/test/Transforms/Inline/byval-tail-call.ll16
-rw-r--r--llvm/test/Transforms/Inline/byval-with-non-alloca-addrspace.ll4
-rw-r--r--llvm/test/Transforms/Inline/byval.ll12
-rw-r--r--llvm/test/Transforms/Inline/callbr.ll8
-rw-r--r--llvm/test/Transforms/Inline/devirtualize-4.ll20
-rw-r--r--llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll4
-rw-r--r--llvm/test/Transforms/Inline/inline-deferred-instsimplify.ll4
-rw-r--r--llvm/test/Transforms/Inline/inline-tail.ll4
-rw-r--r--llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll12
-rw-r--r--llvm/test/Transforms/Inline/lifetime-no-datalayout.ll4
-rw-r--r--llvm/test/Transforms/Inline/lifetime.ll48
-rw-r--r--llvm/test/Transforms/Inline/no-inline-incompatible-gc.ll16
-rw-r--r--llvm/test/Transforms/Inline/noalias-calls-always.ll8
-rw-r--r--llvm/test/Transforms/Inline/noalias-calls.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/assume_inevitable.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/builtin-object-size-offset.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/builtin-object-size-ptr.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/compare-alloca.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/deadcode.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll40
-rw-r--r--llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll20
-rw-r--r--llvm/test/Transforms/InstCombine/lifetime.ll40
-rw-r--r--llvm/test/Transforms/InstCombine/lower-dbg-declare.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/malloc-free.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/memcpy-from-global.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll56
-rw-r--r--llvm/test/Transforms/InstCombine/trivial-dse-calls.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/unreachable-alloca-lifetime-markers.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/vararg.ll24
-rw-r--r--llvm/test/Transforms/LICM/dropped-tbaa.ll8
-rw-r--r--llvm/test/Transforms/LICM/hoisting-preheader-debugloc.ll8
-rw-r--r--llvm/test/Transforms/LICM/loopsink-pr38462.ll8
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/lifetime-use.ll8
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/lsr-comp-time.ll56
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/Hexagon/minimum-vf.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-only-for-real.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll79
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll54
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll550
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll248
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll73
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll450
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/f16.ll39
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll180
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/force-vect-msg.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll55
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll1294
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll270
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll102
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll104
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll46
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll86
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll58
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll780
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-bf16.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-f16.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/reg-usage.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll42
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll444
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll108
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll205
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll248
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll36
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll359
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll128
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll288
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll38
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll32
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-03.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr35432.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/lifetime.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/scalable-lifetime.ll36
-rw-r--r--llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll54
-rw-r--r--llvm/test/Transforms/Mem2Reg/alloca_addrspace.ll2
-rw-r--r--llvm/test/Transforms/Mem2Reg/ignore-droppable.ll12
-rw-r--r--llvm/test/Transforms/Mem2Reg/ignore-lifetime.ll12
-rw-r--r--llvm/test/Transforms/MemCpyOpt/callslot_badaa.ll10
-rw-r--r--llvm/test/Transforms/MemCpyOpt/capturing-func.ll12
-rw-r--r--llvm/test/Transforms/MemCpyOpt/lifetime-missing.ll4
-rw-r--r--llvm/test/Transforms/MemCpyOpt/lifetime.ll48
-rw-r--r--llvm/test/Transforms/MemCpyOpt/memcpy-byval-forwarding-clobbers.ll18
-rw-r--r--llvm/test/Transforms/MemCpyOpt/memcpy-gep-modification.ll20
-rw-r--r--llvm/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll16
-rw-r--r--llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll18
-rw-r--r--llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll12
-rw-r--r--llvm/test/Transforms/MemCpyOpt/pr29105.ll20
-rw-r--r--llvm/test/Transforms/MemCpyOpt/preserve-memssa.ll6
-rw-r--r--llvm/test/Transforms/MemCpyOpt/stack-move.ll570
-rw-r--r--llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll4
-rw-r--r--llvm/test/Transforms/MoveAutoInit/clobber.ll20
-rw-r--r--llvm/test/Transforms/NewGVN/coercion-different-ptr.ll6
-rw-r--r--llvm/test/Transforms/NewGVN/cond_br2-xfail.ll8
-rw-r--r--llvm/test/Transforms/NewGVN/lifetime-simple.ll12
-rw-r--r--llvm/test/Transforms/NewGVN/verify-memoryphi.ll6
-rw-r--r--llvm/test/Transforms/NewGVN/vscale.ll8
-rw-r--r--llvm/test/Transforms/ObjCARC/inlined-autorelease-return-value.ll20
-rw-r--r--llvm/test/Transforms/ObjCARC/post-inlining.ll12
-rw-r--r--llvm/test/Transforms/ObjCARC/related-check.ll12
-rw-r--r--llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll20
-rw-r--r--llvm/test/Transforms/OpenMP/nested_parallelism.ll36
-rw-r--r--llvm/test/Transforms/OpenMP/parallel_deletion.ll36
-rw-r--r--llvm/test/Transforms/OpenMP/parallel_region_merging.ll52
-rw-r--r--llvm/test/Transforms/OpenMP/spmdization.ll4
-rw-r--r--llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll4
-rw-r--r--llvm/test/Transforms/OpenMP/spmdization_indirect.ll4
-rw-r--r--llvm/test/Transforms/OpenMP/spmdization_remarks.ll20
-rw-r--r--llvm/test/Transforms/PGOProfile/consecutive-zeros.ll4
-rw-r--r--llvm/test/Transforms/PGOProfile/entry_alloca.ll8
-rw-r--r--llvm/test/Transforms/PGOProfile/memop_size_annotation.ll4
-rw-r--r--llvm/test/Transforms/PGOProfile/memop_size_opt.ll4
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll12
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll12
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll12
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll12
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch.ll12
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll20
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-switch.ll20
-rw-r--r--llvm/test/Transforms/PartialInlining/switch_stmt.ll4
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll20
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll8
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll12
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll52
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll12
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/slpordering.ll72
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/udotabd.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/SROA-after-loop-unrolling.ll40
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll12
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/loop-vectorizer-noalias.ll4
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/pr61061.ll8
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll8
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll20
-rw-r--r--llvm/test/Transforms/PhaseOrdering/assume-explosion.ll4
-rw-r--r--llvm/test/Transforms/PhaseOrdering/dse-ephemeral-value-captures.ll8
-rw-r--r--llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll8
-rw-r--r--llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll12
-rw-r--r--llvm/test/Transforms/PhaseOrdering/lifetime-sanitizer.ll20
-rw-r--r--llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll36
-rw-r--r--llvm/test/Transforms/PhaseOrdering/loop-rotation-vs-common-code-hoisting.ll8
-rw-r--r--llvm/test/Transforms/PhaseOrdering/vector-select.ll16
-rw-r--r--llvm/test/Transforms/SROA/alloca-address-space.ll2
-rw-r--r--llvm/test/Transforms/SROA/basictest.ll34
-rw-r--r--llvm/test/Transforms/SROA/dead-inst.ll12
-rw-r--r--llvm/test/Transforms/SROA/ignore-droppable.ll12
-rw-r--r--llvm/test/Transforms/SROA/lifetime-intrinsic.ll8
-rw-r--r--llvm/test/Transforms/SROA/non-capturing-call-readonly.ll6
-rw-r--r--llvm/test/Transforms/SROA/pr26972.ll4
-rw-r--r--llvm/test/Transforms/SROA/readonlynocapture.ll32
-rw-r--r--llvm/test/Transforms/SROA/select-load.ll14
-rw-r--r--llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll8
-rw-r--r--llvm/test/Transforms/SROA/vector-promotion.ll2
-rw-r--r--llvm/test/Transforms/SafeStack/ARM/debug.ll8
-rw-r--r--llvm/test/Transforms/SafeStack/X86/call.ll8
-rw-r--r--llvm/test/Transforms/SafeStack/X86/coloring-ssp.ll12
-rw-r--r--llvm/test/Transforms/SafeStack/X86/coloring.ll16
-rw-r--r--llvm/test/Transforms/SafeStack/X86/coloring2.ll162
-rw-r--r--llvm/test/Transforms/SafeStack/X86/debug-loc2.ll4
-rw-r--r--llvm/test/Transforms/SafeStack/X86/layout-frag.ll16
-rw-r--r--llvm/test/Transforms/SafeStack/X86/no-crash-on-lifetime.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/csspgo-import-list-callee-samples.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/entry_counts_cold.ll8
-rw-r--r--llvm/test/Transforms/SampleProfile/entry_counts_missing_dbginfo.ll8
-rw-r--r--llvm/test/Transforms/SampleProfile/non-probe-stale-profile-matching.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/profile-mismatch.ll8
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-discriminator.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-icp-factor.ll16
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch.ll8
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-LCS.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-name-similarity.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming-recursive.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-toplev-func.ll4
-rw-r--r--llvm/test/Transforms/SampleProfile/remarks.ll12
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/critedge-assume.ll4
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/empty-cleanuppad.ll12
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/invalidate-dom.ll4
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll12
-rw-r--r--llvm/test/Transforms/SimplifyCFG/common-code-hoisting.ll4
-rw-r--r--llvm/test/Transforms/SimplifyCFG/invoke_unwind_lifetime.ll28
-rw-r--r--llvm/test/Transforms/SimplifyCFG/lifetime-landingpad.ll14
-rw-r--r--llvm/test/Transforms/SimplifyCFG/lifetime.ll8
-rw-r--r--llvm/test/Transforms/SimplifyCFG/pr50060-constantfold-loopid.ll20
-rw-r--r--llvm/test/Transforms/SimplifyCFG/sink-and-convert-switch.ll8
-rw-r--r--llvm/test/Transforms/SimplifyCFG/tail-merge-noreturn.ll20
-rw-r--r--llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll20
-rw-r--r--llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll20
-rw-r--r--llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll12
-rw-r--r--llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll12
-rw-r--r--llvm/test/Transforms/Util/PredicateInfo/pr33456.ll4
-rw-r--r--llvm/test/Transforms/Util/dbg-call-bitcast.ll12
-rw-r--r--llvm/test/Verifier/intrinsic-immarg.ll20
-rw-r--r--llvm/test/Verifier/opaque-ptr.ll14
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll12
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.expected20
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.expected20
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.globals.expected20
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.noglobals.expected20
-rw-r--r--llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.transitiveglobals.expected20
-rw-r--r--llvm/test/tools/llvm-reduce/operands-to-args-lifetimes.ll8
-rw-r--r--llvm/test/tools/llvm-reduce/reduce-operands-alloca.ll10
449 files changed, 10168 insertions, 7409 deletions
diff --git a/llvm/test/Analysis/BasicAA/modref.ll b/llvm/test/Analysis/BasicAA/modref.ll
index 1aab28f3..4a91fee 100644
--- a/llvm/test/Analysis/BasicAA/modref.ll
+++ b/llvm/test/Analysis/BasicAA/modref.ll
@@ -2,7 +2,7 @@
; RUN: opt < %s -aa-pipeline=basic-aa -passes=gvn,dse -S | FileCheck %s
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @external(ptr)
@@ -71,7 +71,7 @@ define void @test3(i8 %X) {
; CHECK-LABEL: @test3(
; CHECK-NEXT: [[P:%.*]] = alloca i64, align 8
; CHECK-NEXT: [[P2:%.*]] = getelementptr i8, ptr [[P]], i32 2
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
; CHECK-NEXT: store i8 2, ptr [[P2]], align 1
; CHECK-NEXT: call void @external(ptr [[P]])
; CHECK-NEXT: ret void
@@ -81,7 +81,7 @@ define void @test3(i8 %X) {
%P2 = getelementptr i8, ptr %P, i32 2
store i8 %Y, ptr %P2 ;; Not read by lifetime.end, should be removed.
- call void @llvm.lifetime.end.p0(i64 1, ptr %P)
+ call void @llvm.lifetime.end.p0(ptr %P)
store i8 2, ptr %P2
call void @external(ptr %P)
ret void
@@ -90,7 +90,7 @@ define void @test3(i8 %X) {
define void @test3a(i8 %X) {
; CHECK-LABEL: @test3a(
; CHECK-NEXT: [[P:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 10, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
; CHECK-NEXT: ret void
;
%P = alloca i64
@@ -98,7 +98,7 @@ define void @test3a(i8 %X) {
%P2 = getelementptr i8, ptr %P, i32 2
store i8 %Y, ptr %P2
- call void @llvm.lifetime.end.p0(i64 10, ptr %P)
+ call void @llvm.lifetime.end.p0(ptr %P)
ret void
}
diff --git a/llvm/test/Analysis/BasicAA/phi-values-usage.ll b/llvm/test/Analysis/BasicAA/phi-values-usage.ll
index 43df41c..680e1df 100644
--- a/llvm/test/Analysis/BasicAA/phi-values-usage.ll
+++ b/llvm/test/Analysis/BasicAA/phi-values-usage.ll
@@ -14,7 +14,7 @@ target datalayout = "p:8:8-n8"
declare void @otherfn(ptr)
declare i32 @__gxx_personality_v0(...)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
@c = external global ptr, align 1
; This function is one where if we didn't free basicaa after memcpyopt then the
@@ -65,7 +65,7 @@ for.body: ; preds = %for.cond
br label %for.cond
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 1, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
%1 = load ptr, ptr %d.0, align 1
store ptr %1, ptr @c, align 1
ret void
diff --git a/llvm/test/Analysis/CallGraph/ignore-assumelike-calls.ll b/llvm/test/Analysis/CallGraph/ignore-assumelike-calls.ll
index 1c9d201..b93a2a0 100644
--- a/llvm/test/Analysis/CallGraph/ignore-assumelike-calls.ll
+++ b/llvm/test/Analysis/CallGraph/ignore-assumelike-calls.ll
@@ -29,7 +29,7 @@
define internal void @used_by_lifetime() {
entry:
%a = alloca i8
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
ret void
}
@@ -55,6 +55,6 @@ define internal void @other_cast_intrinsic_use() {
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.memset.p0.i64(ptr, i8, i64, i1 immarg)
declare void @llvm.memset.p1.i64(ptr addrspace(1), i8, i64, i1 immarg)
diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-arith-fp.ll b/llvm/test/Analysis/CostModel/AArch64/sve-arith-fp.ll
index dc95eac..f7ebd40 100644
--- a/llvm/test/Analysis/CostModel/AArch64/sve-arith-fp.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/sve-arith-fp.ll
@@ -164,3 +164,55 @@ define void @frem() {
ret void
}
+
+define void @fma() {
+; CHECK-LABEL: 'fma'
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V4F16 = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V8F16 = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef)
+; CHECK-NEXT: Cost Model: Found costs of 4 for: %V16F16 = call <vscale x 16 x half> @llvm.fma.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V2F32 = call <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V4F32 = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
+; CHECK-NEXT: Cost Model: Found costs of 4 for: %V8F32 = call <vscale x 8 x float> @llvm.fma.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V2F64 = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef)
+; CHECK-NEXT: Cost Model: Found costs of 4 for: %V4F64 = call <vscale x 4 x double> @llvm.fma.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %V4F16 = call <vscale x 4 x half> @llvm.fma.v4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef)
+ %V8F16 = call <vscale x 8 x half> @llvm.fma.v8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef)
+ %V16F16 = call <vscale x 16 x half> @llvm.fma.v16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef)
+
+ %V2F32 = call <vscale x 2 x float> @llvm.fma.v2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
+ %V4F32 = call <vscale x 4 x float> @llvm.fma.v4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
+ %V8F32 = call <vscale x 8 x float> @llvm.fma.v8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef)
+
+ %V2F64 = call <vscale x 2 x double> @llvm.fma.v2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef)
+ %V4F64 = call <vscale x 4 x double> @llvm.fma.v4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef)
+
+ ret void
+}
+
+define void @fmuladd() {
+; CHECK-LABEL: 'fmuladd'
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V4F16 = call <vscale x 4 x half> @llvm.fmuladd.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V8F16 = call <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef)
+; CHECK-NEXT: Cost Model: Found costs of 4 for: %V16F16 = call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V2F32 = call <vscale x 2 x float> @llvm.fmuladd.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V4F32 = call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
+; CHECK-NEXT: Cost Model: Found costs of 4 for: %V8F32 = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef)
+; CHECK-NEXT: Cost Model: Found costs of 2 for: %V2F64 = call <vscale x 2 x double> @llvm.fmuladd.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef)
+; CHECK-NEXT: Cost Model: Found costs of 4 for: %V4F64 = call <vscale x 4 x double> @llvm.fmuladd.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %V4F16 = call <vscale x 4 x half> @llvm.fmuladd.v4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef)
+ %V8F16 = call <vscale x 8 x half> @llvm.fmuladd.v8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef)
+ %V16F16 = call <vscale x 16 x half> @llvm.fmuladd.v16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef)
+
+ %V2F32 = call <vscale x 2 x float> @llvm.fmuladd.v2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
+ %V4F32 = call <vscale x 4 x float> @llvm.fmuladd.v4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
+ %V8F32 = call <vscale x 8 x float> @llvm.fmuladd.v8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef)
+
+ %V2F64 = call <vscale x 2 x double> @llvm.fmuladd.v2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef)
+ %V4F64 = call <vscale x 4 x double> @llvm.fmuladd.v4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef)
+
+ ret void
+}
diff --git a/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll b/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll
index ecc7fc8..245e8f7 100644
--- a/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll
+++ b/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll
@@ -23,7 +23,7 @@
%"class.llvm::Metadata.306.1758.9986.10470.10954.11438.11922.12406.12890.13374.13858.15310.15794.16278.17730.19182.21118.25958.26926.29346.29830.30314.30798.31282.31766.32250.32734.33702.36606.38058.41638" = type { i8, i8, i16, i32 }
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end(i64, ptr nocapture) #0
+declare void @llvm.lifetime.end(ptr nocapture) #0
; Function Attrs: nounwind ssp uwtable
define hidden void @fun(ptr %N, i1 %arg) #1 align 2 {
@@ -42,7 +42,6 @@ for.cond.cleanup: ; preds = %for.body, %entry
for.body: ; preds = %for.body, %for.body.lr.ph
%indvars.iv190 = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next191, %for.body ]
- call void @llvm.lifetime.end(i64 16, ptr nonnull null)
%indvars.iv.next191 = add nuw nsw i64 %indvars.iv190, 1
%exitcond193 = icmp eq i64 %indvars.iv.next191, %wide.trip.count192
br i1 %exitcond193, label %for.cond.cleanup, label %for.body
diff --git a/llvm/test/Analysis/CostModel/X86/free-intrinsics.ll b/llvm/test/Analysis/CostModel/X86/free-intrinsics.ll
index 3a54428..cef960d 100644
--- a/llvm/test/Analysis/CostModel/X86/free-intrinsics.ll
+++ b/llvm/test/Analysis/CostModel/X86/free-intrinsics.ll
@@ -14,8 +14,8 @@ define i32 @trivially_free() {
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef)
-; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
-; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
@@ -32,8 +32,8 @@ define i32 @trivially_free() {
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef)
-; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
-; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
@@ -49,8 +49,8 @@ define i32 @trivially_free() {
%a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
%a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
%a4 = call i1 @llvm.is.constant.i32(i32 undef)
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
%a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 1, i1 1, i1 1)
%a6 = call ptr @llvm.ptr.annotation.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
call void @llvm.var.annotation(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
@@ -66,8 +66,8 @@ declare void @llvm.invariant.end.p0(ptr, i64, ptr)
declare ptr @llvm.launder.invariant.group.p0(ptr)
declare ptr @llvm.strip.invariant.group.p0(ptr)
declare i1 @llvm.is.constant.i32(i32)
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1)
declare ptr @llvm.ptr.annotation.p0(ptr, ptr, ptr, i32, ptr)
declare void @llvm.var.annotation(ptr, ptr, ptr, i32, ptr)
diff --git a/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll b/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll
index 96064dc..2acc8e8 100644
--- a/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll
+++ b/llvm/test/Analysis/CostModel/free-intrinsics-datalayout.ll
@@ -16,8 +16,8 @@ define i32 @trivially_free() {
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef)
-; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
-; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a7 = call i1 @llvm.allow.ubsan.check(i8 123)
@@ -36,8 +36,8 @@ define i32 @trivially_free() {
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef)
-; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
-; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a7 = call i1 @llvm.allow.ubsan.check(i8 123)
@@ -55,8 +55,8 @@ define i32 @trivially_free() {
%a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
%a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
%a4 = call i1 @llvm.is.constant.i32(i32 undef)
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
%a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 1, i1 1, i1 1)
%a6 = call ptr @llvm.ptr.annotation.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
%a7 = call i1 @llvm.allow.ubsan.check(i8 123)
@@ -74,8 +74,8 @@ declare void @llvm.invariant.end.p0(ptr, i64, ptr)
declare ptr @llvm.launder.invariant.group.p0(ptr)
declare ptr @llvm.strip.invariant.group.p0(ptr)
declare i1 @llvm.is.constant.i32(i32)
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1)
declare ptr @llvm.ptr.annotation.p0(ptr, ptr, ptr, i32, ptr)
declare void @llvm.var.annotation(ptr, ptr, ptr, i32, ptr)
diff --git a/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll b/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll
index f989ebe..7f002d0 100644
--- a/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll
+++ b/llvm/test/Analysis/CostModel/free-intrinsics-no_info.ll
@@ -14,8 +14,8 @@ define i32 @trivially_free() {
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef)
-; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
-; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca)
+; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
@@ -34,8 +34,8 @@ define i32 @trivially_free() {
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a4 = call i1 @llvm.is.constant.i32(i32 undef)
-; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
-; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.start.p0(ptr %alloca)
+; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.lifetime.end.p0(ptr %alloca)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 true, i1 true, i1 true)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a6 = call ptr @llvm.ptr.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
; CHECK-THROUGHPUT-NEXT: Cost Model: Found an estimated cost of 0 for instruction: call void @llvm.var.annotation.p0.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
@@ -53,8 +53,8 @@ define i32 @trivially_free() {
%a2 = call ptr @llvm.launder.invariant.group.p0(ptr undef)
%a3 = call ptr @llvm.strip.invariant.group.p0(ptr undef)
%a4 = call i1 @llvm.is.constant.i32(i32 undef)
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
%a5 = call i64 @llvm.objectsize.i64.p0(ptr undef, i1 1, i1 1, i1 1)
%a6 = call ptr @llvm.ptr.annotation.p0(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
call void @llvm.var.annotation(ptr undef, ptr undef, ptr undef, i32 undef, ptr undef)
@@ -72,8 +72,8 @@ declare void @llvm.invariant.end.p0(ptr, i64, ptr)
declare ptr @llvm.launder.invariant.group.p0(ptr)
declare ptr @llvm.strip.invariant.group.p0(ptr)
declare i1 @llvm.is.constant.i32(i32)
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1)
declare ptr @llvm.ptr.annotation.p0(ptr, ptr, ptr, i32, ptr)
declare void @llvm.var.annotation(ptr, ptr, ptr, i32, ptr)
diff --git a/llvm/test/Analysis/DDG/basic-loopnest.ll b/llvm/test/Analysis/DDG/basic-loopnest.ll
index 325428c..75efff5 100644
--- a/llvm/test/Analysis/DDG/basic-loopnest.ll
+++ b/llvm/test/Analysis/DDG/basic-loopnest.ll
@@ -1,5 +1,8 @@
; RUN: opt < %s -disable-output "-passes=print<ddg>" 2>&1 | FileCheck %s
+; XFAIL: *
+; At the moment, DependenceAnalysis cannot infer `n` to be positive.
+
; CHECK-LABEL: 'DDG' for loop 'test1.for.cond1.preheader':
@@ -378,4 +381,4 @@ for.inc12: ; preds = %for.body4, %test2.f
for.end14: ; preds = %for.inc12, %entry
ret void
-} \ No newline at end of file
+}
diff --git a/llvm/test/Analysis/DependenceAnalysis/Coupled.ll b/llvm/test/Analysis/DependenceAnalysis/Coupled.ll
index 06bfc5d..1d45134 100644
--- a/llvm/test/Analysis/DependenceAnalysis/Coupled.ll
+++ b/llvm/test/Analysis/DependenceAnalysis/Coupled.ll
@@ -719,12 +719,14 @@ for.end: ; preds = %for.body
;; for(int j = 0; j < M; j+=1)
;; A[M*N + M*i + j] = 2;
+; FIXME: Currently failing to infer %M being positive.
+
define void @couple_weakzerosiv(ptr noalias nocapture %A, i64 %N, i64 %M) {
; CHECK-LABEL: 'couple_weakzerosiv'
; CHECK-NEXT: Src: store i32 1, ptr %arrayidx.us, align 4 --> Dst: store i32 1, ptr %arrayidx.us, align 4
; CHECK-NEXT: da analyze - none!
; CHECK-NEXT: Src: store i32 1, ptr %arrayidx.us, align 4 --> Dst: store i32 2, ptr %arrayidx9.us, align 4
-; CHECK-NEXT: da analyze - output [p>]!
+; CHECK-NEXT: da analyze - output [*|<]!
; CHECK-NEXT: Src: store i32 2, ptr %arrayidx9.us, align 4 --> Dst: store i32 2, ptr %arrayidx9.us, align 4
; CHECK-NEXT: da analyze - none!
;
diff --git a/llvm/test/Analysis/DependenceAnalysis/DADelin.ll b/llvm/test/Analysis/DependenceAnalysis/DADelin.ll
index b2e4959..8f94a45 100644
--- a/llvm/test/Analysis/DependenceAnalysis/DADelin.ll
+++ b/llvm/test/Analysis/DependenceAnalysis/DADelin.ll
@@ -594,14 +594,15 @@ for.end12: ; preds = %for.inc10, %entry
}
+; FIXME? It seems that we cannot prove that %N is non-negative...
define void @nonnegative(ptr nocapture %A, i32 %N) {
; CHECK-LABEL: 'nonnegative'
; CHECK-NEXT: Src: store i32 1, ptr %arrayidx, align 4 --> Dst: store i32 1, ptr %arrayidx, align 4
-; CHECK-NEXT: da analyze - none!
+; CHECK-NEXT: da analyze - output [* *]!
; CHECK-NEXT: Src: store i32 1, ptr %arrayidx, align 4 --> Dst: store i32 2, ptr %arrayidx, align 4
-; CHECK-NEXT: da analyze - consistent output [0 0|<]!
+; CHECK-NEXT: da analyze - output [* *|<]!
; CHECK-NEXT: Src: store i32 2, ptr %arrayidx, align 4 --> Dst: store i32 2, ptr %arrayidx, align 4
-; CHECK-NEXT: da analyze - none!
+; CHECK-NEXT: da analyze - output [* *]!
;
entry:
%cmp44 = icmp eq i32 %N, 0
@@ -630,3 +631,81 @@ for.latch:
exit:
ret void
}
+
+; i = 0;
+; do {
+; a[k * i] = 42;
+; a[k * (i + 1)] = 42;
+; i++;
+; } while (i != k);
+;
+; The dependency direction between the two stores depends on the sign of k.
+; Note that the loop guard is omitted intentionally.
+; FIXME: Each store has loop-carried dependencies on itself if k is zero.
+;
+define void @coeff_may_negative(ptr %a, i32 %k) {
+; CHECK-LABEL: 'coeff_may_negative'
+; CHECK-NEXT: Src: store i8 42, ptr %idx.0, align 1 --> Dst: store i8 42, ptr %idx.0, align 1
+; CHECK-NEXT: da analyze - none!
+; CHECK-NEXT: Src: store i8 42, ptr %idx.0, align 1 --> Dst: store i8 42, ptr %idx.1, align 1
+; CHECK-NEXT: da analyze - output [*|<]!
+; CHECK-NEXT: Src: store i8 42, ptr %idx.1, align 1 --> Dst: store i8 42, ptr %idx.1, align 1
+; CHECK-NEXT: da analyze - none!
+;
+entry:
+ br label %loop
+
+loop:
+ %i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
+ %i.next = add i32 %i, 1
+ %subscript.0 = mul i32 %i, %k
+ %subscript.1 = mul i32 %i.next, %k
+ %idx.0 = getelementptr i8, ptr %a, i32 %subscript.0
+ %idx.1 = getelementptr i8, ptr %a, i32 %subscript.1
+ store i8 42, ptr %idx.0
+ store i8 42, ptr %idx.1
+ %cond.exit = icmp eq i32 %i.next, %k
+ br i1 %cond.exit, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+; i = 0;
+; do {
+; a[k * i] = 42;
+; a[k * (i + 1)] = 42;
+; i++;
+; } while (i != k);
+;
+; Note that the loop guard is omitted intentionally.
+; FIXME: In principle, we can infer that the value of k is non-negative from
+; the nsw flag.
+;
+define void @coeff_positive(ptr %a, i32 %k) {
+; CHECK-LABEL: 'coeff_positive'
+; CHECK-NEXT: Src: store i8 42, ptr %idx.0, align 1 --> Dst: store i8 42, ptr %idx.0, align 1
+; CHECK-NEXT: da analyze - none!
+; CHECK-NEXT: Src: store i8 42, ptr %idx.0, align 1 --> Dst: store i8 42, ptr %idx.1, align 1
+; CHECK-NEXT: da analyze - output [*|<]!
+; CHECK-NEXT: Src: store i8 42, ptr %idx.1, align 1 --> Dst: store i8 42, ptr %idx.1, align 1
+; CHECK-NEXT: da analyze - none!
+;
+entry:
+ br label %loop
+
+loop:
+ %i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
+ %i.next = add nsw i32 %i, 1
+ %subscript.0 = mul i32 %i, %k
+ %subscript.1 = mul i32 %i.next, %k
+ %idx.0 = getelementptr i8, ptr %a, i32 %subscript.0
+ %idx.1 = getelementptr i8, ptr %a, i32 %subscript.1
+ store i8 42, ptr %idx.0
+ store i8 42, ptr %idx.1
+ %cond.exit = icmp eq i32 %i.next, %k
+ br i1 %cond.exit, label %exit, label %loop
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Analysis/KernelInfo/openmp/nvptx.ll b/llvm/test/Analysis/KernelInfo/openmp/nvptx.ll
index bd46741..da5a898 100644
--- a/llvm/test/Analysis/KernelInfo/openmp/nvptx.ll
+++ b/llvm/test/Analysis/KernelInfo/openmp/nvptx.ll
@@ -417,7 +417,7 @@ define internal noundef range(i32 -1, 1024) i32 @__kmpc_target_init(ptr nofree n
br label %116
116: ; preds = %110, %128
- call void @llvm.lifetime.start.p0(i64 noundef 8, ptr noundef nonnull align 8 dereferenceable(8) %3) #20
+ call void @llvm.lifetime.start.p0(ptr noundef nonnull align 8 dereferenceable(8) %3) #20
tail call void @llvm.nvvm.barrier.sync(i32 noundef 8)
%117 = call zeroext i1 @__kmpc_kernel_parallel(ptr noalias nocapture nofree noundef nonnull writeonly align 8 dereferenceable(8) %3) #20
%118 = load ptr, ptr %3, align 8, !tbaa !93
@@ -446,11 +446,11 @@ define internal noundef range(i32 -1, 1024) i32 @__kmpc_target_init(ptr nofree n
128: ; preds = %126, %120
tail call void @llvm.nvvm.barrier.sync(i32 noundef 8)
- call void @llvm.lifetime.end.p0(i64 noundef 8, ptr noundef nonnull %3) #20
+ call void @llvm.lifetime.end.p0(ptr noundef nonnull %3) #20
br label %116, !llvm.loop !94
129: ; preds = %116
- call void @llvm.lifetime.end.p0(i64 noundef 8, ptr noundef nonnull %3) #20
+ call void @llvm.lifetime.end.p0(ptr noundef nonnull %3) #20
br label %130
130: ; preds = %106, %129, %100, %98
@@ -495,7 +495,7 @@ define internal fastcc void @__assert_fail_internal(ptr noundef nonnull derefere
declare void @llvm.assume(i1 noundef) #9
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #10
+declare void @llvm.lifetime.start.p0(ptr nocapture) #10
; Function Attrs: convergent nocallback nounwind
declare void @llvm.nvvm.barrier.sync(i32) #11
@@ -587,7 +587,7 @@ define internal void @__kmpc_kernel_end_parallel() local_unnamed_addr #13 {
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #10
+declare void @llvm.lifetime.end.p0(ptr nocapture) #10
; Function Attrs: convergent mustprogress nounwind willreturn allockind("free") memory(argmem: readwrite, inaccessiblemem: readwrite)
declare extern_weak void @free(ptr allocptr nocapture noundef) local_unnamed_addr #14
@@ -595,11 +595,11 @@ declare extern_weak void @free(ptr allocptr nocapture noundef) local_unnamed_add
; Function Attrs: convergent mustprogress nounwind
define internal noundef i32 @_ZN4ompx6printfEPKcz(ptr noundef %0, ...) local_unnamed_addr #15 {
%2 = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 noundef 8, ptr noundef nonnull align 8 %2) #29
+ call void @llvm.lifetime.start.p0(ptr noundef nonnull align 8 %2) #29
call void @llvm.va_start.p0(ptr noundef nonnull align 8 %2) #27
%3 = load ptr, ptr %2, align 8, !tbaa !101
%4 = call i32 @vprintf(ptr noundef %0, ptr noundef %3) #24
- call void @llvm.lifetime.end.p0(i64 noundef 8, ptr noundef nonnull %2) #20
+ call void @llvm.lifetime.end.p0(ptr noundef nonnull %2) #20
ret i32 %4
}
@@ -641,7 +641,7 @@ define internal void @__kmpc_target_deinit() #4 {
br i1 %14, label %15, label %27
15: ; preds = %11
- call void @llvm.lifetime.start.p0(i64 noundef 8, ptr noundef nonnull align 8 dereferenceable(8) %1) #29
+ call void @llvm.lifetime.start.p0(ptr noundef nonnull align 8 dereferenceable(8) %1) #29
%16 = call zeroext i1 @__kmpc_kernel_parallel(ptr noalias nocapture nofree noundef nonnull writeonly align 8 dereferenceable(8) %1) #20
%17 = load i32, ptr @__omp_rtl_debug_kind, align 4, !tbaa !62
%18 = load i32, ptr addrspace(4) @__omp_rtl_device_environment, align 8, !tbaa !83
@@ -659,7 +659,7 @@ define internal void @__kmpc_target_deinit() #4 {
26: ; preds = %15
tail call void @llvm.assume(i1 noundef %23) #23
- call void @llvm.lifetime.end.p0(i64 noundef 8, ptr noundef nonnull %1) #20
+ call void @llvm.lifetime.end.p0(ptr noundef nonnull %1) #20
br label %27
27: ; preds = %26, %11, %10, %0
diff --git a/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll b/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll
index 71ea5d2..0ad1a33 100644
--- a/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll
+++ b/llvm/test/Analysis/LazyValueAnalysis/invalidation.ll
@@ -17,13 +17,13 @@ target triple = "x86_64-unknown-linux-gnu"
@.str = private unnamed_addr constant [8 x i8] c"a = %l\0A\00", align 1
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @hoo(ptr)
declare i32 @printf(ptr nocapture readonly, ...)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @goo(i32 %N, ptr %b) {
entry:
@@ -38,12 +38,12 @@ for.cond: ; preds = %for.body, %entry
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 8, ptr %tmp)
+ call void @llvm.lifetime.start.p0(ptr %tmp)
call void @hoo(ptr %a.i)
call void @hoo(ptr %c)
%tmp1 = load volatile i64, ptr %a.i, align 8
%call.i = call i32 (ptr, ...) @printf(ptr @.str, i64 %tmp1)
- call void @llvm.lifetime.end.p0(i64 8, ptr %tmp)
+ call void @llvm.lifetime.end.p0(ptr %tmp)
%inc = add nsw i32 %i.0, 1
br label %for.cond
diff --git a/llvm/test/Analysis/MemorySSA/lifetime-simple.ll b/llvm/test/Analysis/MemorySSA/lifetime-simple.ll
index 18d2459..03b6768 100644
--- a/llvm/test/Analysis/MemorySSA/lifetime-simple.ll
+++ b/llvm/test/Analysis/MemorySSA/lifetime-simple.ll
@@ -9,8 +9,8 @@ entry:
%P = alloca [32 x i8]
%Q = call ptr @obscure(ptr %P)
; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr %P)
- call void @llvm.lifetime.start.p0(i64 32, ptr %P)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %P)
+ call void @llvm.lifetime.start.p0(ptr %P)
; CHECK: MemoryUse(1)
; CHECK-NEXT: %0 = load i8, ptr %P
%0 = load i8, ptr %P
@@ -18,8 +18,8 @@ entry:
; CHECK-NEXT: store i8 1, ptr %P
store i8 1, ptr %P
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr %P)
- call void @llvm.lifetime.end.p0(i64 32, ptr %P)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %P)
+ call void @llvm.lifetime.end.p0(ptr %P)
; CHECK: MemoryUse(3)
; CHECK-NEXT: %1 = load i8, ptr %P
%1 = load i8, ptr %P
@@ -28,5 +28,5 @@ entry:
%2 = load i8, ptr %Q
ret i8 %1
}
-declare void @llvm.lifetime.start.p0(i64 %S, ptr nocapture %P) readonly
-declare void @llvm.lifetime.end.p0(i64 %S, ptr nocapture %P)
+declare void @llvm.lifetime.start.p0(ptr nocapture %P) readonly
+declare void @llvm.lifetime.end.p0(ptr nocapture %P)
diff --git a/llvm/test/Analysis/MemorySSA/phi-translation.ll b/llvm/test/Analysis/MemorySSA/phi-translation.ll
index b824481..22bbead 100644
--- a/llvm/test/Analysis/MemorySSA/phi-translation.ll
+++ b/llvm/test/Analysis/MemorySSA/phi-translation.ll
@@ -465,7 +465,7 @@ end: ; preds = %for.body
define void @use_clobbered_by_def_in_loop() {
entry:
%nodeStack = alloca [12 x i32], align 4
- call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %nodeStack)
+ call void @llvm.lifetime.start.p0(ptr nonnull %nodeStack)
br i1 false, label %cleanup, label %while.cond
; CHECK-LABEL: while.cond:
@@ -502,12 +502,12 @@ while.end: ; preds = %while.cond, %land.r
br i1 true, label %cleanup, label %while.cond.backedge
cleanup: ; preds = %while.body, %while.end, %entry
- call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %nodeStack)
+ call void @llvm.lifetime.end.p0(ptr nonnull %nodeStack)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @another_loop_clobber_inc() {
; CHECK-LABEL: void @another_loop_clobber_inc
diff --git a/llvm/test/Analysis/MemorySSA/pr43044.ll b/llvm/test/Analysis/MemorySSA/pr43044.ll
index bd767d3..7ae02f3 100644
--- a/llvm/test/Analysis/MemorySSA/pr43044.ll
+++ b/llvm/test/Analysis/MemorySSA/pr43044.ll
@@ -4,7 +4,7 @@
target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64"
target triple = "s390x-ibm-linux"
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; CHECK-LABEL: @func_42()
define void @func_42() {
diff --git a/llvm/test/Analysis/MemorySSA/pr49859.ll b/llvm/test/Analysis/MemorySSA/pr49859.ll
index 25ef586..0e97f57 100644
--- a/llvm/test/Analysis/MemorySSA/pr49859.ll
+++ b/llvm/test/Analysis/MemorySSA/pr49859.ll
@@ -11,12 +11,12 @@ entry:
%n = alloca i8, align 1
%i = alloca i8, align 1
%cleanup.dest.slot = alloca i32, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %sum) #3
+ call void @llvm.lifetime.start.p0(ptr %sum) #3
store i8 0, ptr %sum, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %n) #3
+ call void @llvm.lifetime.start.p0(ptr %n) #3
%call = call i8 @idi(i8 10)
store i8 %call, ptr %n, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %i) #3
+ call void @llvm.lifetime.start.p0(ptr %i) #3
store i8 0, ptr %i, align 1
br label %for.cond
@@ -61,9 +61,9 @@ for.inc: ; preds = %if.end
; CHECK: final.cleanup:
; CHECK-NEXT: ; [[NO20:.*]] = MemoryPhi({if.then,[[NO9:.*]]},{for.cond.cleanup,[[NO8:.*]]})
; CHECK-NEXT: ; [[NO12:.*]] = MemoryDef([[NO20]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr %i)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %i)
final.cleanup: ; preds = %if.then, %for.cond.cleanup
- call void @llvm.lifetime.end.p0(i64 1, ptr %i) #3
+ call void @llvm.lifetime.end.p0(ptr %i) #3
br label %for.end
; CHECK: for.end:
@@ -71,23 +71,23 @@ final.cleanup: ; preds = %if.then, %for
; CHECK-NEXT: %3 = load i8, ptr %sum, align 1
for.end: ; preds = %final.cleanup
%8 = load i8, ptr %sum, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %res.addr.i)
+ call void @llvm.lifetime.start.p0(ptr %res.addr.i)
store i8 %8, ptr %res.addr.i, align 1
%9 = load i8, ptr %res.addr.i, align 1
call void @foo(i8 %9) #3
- call void @llvm.lifetime.end.p0(i64 1, ptr %res.addr.i)
- call void @llvm.lifetime.end.p0(i64 1, ptr %n) #3
- call void @llvm.lifetime.end.p0(i64 1, ptr %sum) #3
+ call void @llvm.lifetime.end.p0(ptr %res.addr.i)
+ call void @llvm.lifetime.end.p0(ptr %n) #3
+ call void @llvm.lifetime.end.p0(ptr %sum) #3
ret void
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i8 @idi(i8)
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind
declare void @foo(i8)
diff --git a/llvm/test/Analysis/MemorySSA/renamephis.ll b/llvm/test/Analysis/MemorySSA/renamephis.ll
index e297b99..a731ef1 100644
--- a/llvm/test/Analysis/MemorySSA/renamephis.ll
+++ b/llvm/test/Analysis/MemorySSA/renamephis.ll
@@ -8,7 +8,7 @@ target triple = "x86_64-unknown-linux-gnu"
declare void @g()
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
; CHECK-LABEL: @f
define void @f(i1 %arg) align 2 {
diff --git a/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll b/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll
index 39b475d..7120eec 100644
--- a/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll
+++ b/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll
@@ -50,7 +50,7 @@ define i32 @d(i32 %base) {
;
entry:
%e = alloca [1 x [1 x i8]], align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %e) #2
+ call void @llvm.lifetime.start.p0(ptr %e) #2
br label %for.cond
for.cond: ; preds = %for.cond, %entry
@@ -69,4 +69,4 @@ for.cond: ; preds = %for.cond, %entry
br label %for.cond
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
diff --git a/llvm/test/Analysis/ScalarEvolution/sdiv.ll b/llvm/test/Analysis/ScalarEvolution/sdiv.ll
index 9eaaf8b..acc6ab0 100644
--- a/llvm/test/Analysis/ScalarEvolution/sdiv.ll
+++ b/llvm/test/Analysis/ScalarEvolution/sdiv.ll
@@ -38,7 +38,7 @@ define dso_local void @_Z4loopi(i32 %width) local_unnamed_addr #0 {
entry:
%storage = alloca [2 x i32], align 4
%0 = bitcast ptr %storage to ptr
- call void @llvm.lifetime.start.p0(i64 8, ptr %storage) #4
+ call void @llvm.lifetime.start.p0(ptr %storage) #4
call void @llvm.memset.p0.i64(ptr align 4 %0, i8 0, i64 8, i1 false)
br label %for.cond
@@ -48,7 +48,7 @@ for.cond:
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup:
- call void @llvm.lifetime.end.p0(i64 8, ptr %storage) #4
+ call void @llvm.lifetime.end.p0(ptr %storage) #4
ret void
for.body:
@@ -64,10 +64,10 @@ for.body:
br label %for.cond
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #2
declare dso_local i32 @_Z3adji(i32) local_unnamed_addr #3
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
diff --git a/llvm/test/Analysis/ScalarEvolution/srem.ll b/llvm/test/Analysis/ScalarEvolution/srem.ll
index 377e58a..9d4538f 100644
--- a/llvm/test/Analysis/ScalarEvolution/srem.ll
+++ b/llvm/test/Analysis/ScalarEvolution/srem.ll
@@ -38,7 +38,7 @@ define dso_local void @_Z4loopi(i32 %width) local_unnamed_addr #0 {
entry:
%storage = alloca [2 x i32], align 4
%0 = bitcast ptr %storage to ptr
- call void @llvm.lifetime.start.p0(i64 8, ptr %storage) #4
+ call void @llvm.lifetime.start.p0(ptr %storage) #4
call void @llvm.memset.p0.i64(ptr align 4 %0, i8 0, i64 8, i1 false)
br label %for.cond
@@ -48,7 +48,7 @@ for.cond:
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup:
- call void @llvm.lifetime.end.p0(i64 8, ptr %storage) #4
+ call void @llvm.lifetime.end.p0(ptr %storage) #4
ret void
for.body:
@@ -64,10 +64,10 @@ for.body:
br label %for.cond
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #2
declare dso_local i32 @_Z3adji(i32) local_unnamed_addr #3
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
diff --git a/llvm/test/Analysis/ScopedNoAliasAA/alias-scope-merging.ll b/llvm/test/Analysis/ScopedNoAliasAA/alias-scope-merging.ll
index 840a517..36d79f9 100644
--- a/llvm/test/Analysis/ScopedNoAliasAA/alias-scope-merging.ll
+++ b/llvm/test/Analysis/ScopedNoAliasAA/alias-scope-merging.ll
@@ -8,10 +8,10 @@ define i8 @test(i8 %input) {
%dst = alloca i8
%src = alloca i8
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %src, i64 1, i1 false), !alias.scope ![[SCOPE:[0-9]+]]
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %src), !noalias !4
+ call void @llvm.lifetime.start.p0(ptr nonnull %src), !noalias !4
store i8 %input, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 %src, i64 1, i1 false), !alias.scope !0
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %src), !noalias !4
+ call void @llvm.lifetime.end.p0(ptr nonnull %src), !noalias !4
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %tmp, i64 1, i1 false), !alias.scope !4
%ret_value = load i8, ptr %dst
call void @use(ptr %src)
@@ -23,8 +23,8 @@ define i8 @test(i8 %input) {
; CHECK-DAG: ![[CALLEE0_B:[0-9]+]] = distinct !{!{{[0-9]+}}, !{{[0-9]+}}, !"callee0: %b"}
; CHECK-DAG: ![[SCOPE]] = !{![[CALLEE0_A]], ![[CALLEE0_B]]}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)
!0 = !{!1, !7}
diff --git a/llvm/test/Analysis/StackSafetyAnalysis/lifetime.ll b/llvm/test/Analysis/StackSafetyAnalysis/lifetime.ll
index 6c3dec9..51bfa15 100644
--- a/llvm/test/Analysis/StackSafetyAnalysis/lifetime.ll
+++ b/llvm/test/Analysis/StackSafetyAnalysis/lifetime.ll
@@ -11,31 +11,31 @@ entry:
; CHECK: %y = alloca i32, align 4
; CHECK-NEXT: Alive: <>
%z = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %z)
; CHECK-NEXT: Alive: <z>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x z>
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <z>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y z>
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <z>
call void @capture32(ptr %z)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %z)
; CHECK-NEXT: Alive: <>
ret void
@@ -48,13 +48,13 @@ entry:
; CHECK-NEXT: Alive: <y>
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x y>
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <y>
call void @capture32(ptr %y)
@@ -69,31 +69,31 @@ entry:
%x = alloca i32, align 4
%y = alloca i32, align 4
%z = alloca i64, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <x y>
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <y>
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %z)
; CHECK-NEXT: Alive: <z>
call void @capture64(ptr %z)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %z)
; CHECK-NEXT: Alive: <>
ret void
@@ -111,31 +111,31 @@ entry:
; CHECK-NEXT: Alive: <>
%z = alloca i64, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <x y>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %z)
; CHECK-NEXT: Alive: <x y z>
call void @capture32(ptr %x)
call void @capture32(ptr %y)
call void @capture64(ptr %z)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <y z>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <z>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %z)
; CHECK-NEXT: Alive: <>
ret void
@@ -154,12 +154,12 @@ entry:
%z = alloca i64, align 8
%z1 = alloca i64, align 8
%z2 = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x1)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x1)
+ call void @llvm.lifetime.start.p0(ptr %x1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x1)
; CHECK-NEXT: Alive: <x1>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x2)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x2)
+ call void @llvm.lifetime.start.p0(ptr %x2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x2)
; CHECK-NEXT: Alive: <x1 x2>
call void @capture64(ptr nonnull %x1)
@@ -171,8 +171,8 @@ entry:
if.then: ; preds = %entry
; CHECK: if.then:
; CHECK-NEXT: Alive: <x1 x2>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <x1 x2 y>
call void @capture64(ptr nonnull %y)
@@ -181,13 +181,13 @@ if.then: ; preds = %entry
if.then3: ; preds = %if.then
; CHECK: if.then3:
; CHECK-NEXT: Alive: <x1 x2 y>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y1)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y1)
+ call void @llvm.lifetime.start.p0(ptr %y1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y1)
; CHECK-NEXT: Alive: <x1 x2 y y1>
call void @capture64(ptr nonnull %y1)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y1)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y1)
+ call void @llvm.lifetime.end.p0(ptr %y1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y1)
; CHECK-NEXT: Alive: <x1 x2 y>
br label %if.end
@@ -195,13 +195,13 @@ if.then3: ; preds = %if.then
if.else: ; preds = %if.then
; CHECK: if.else:
; CHECK-NEXT: Alive: <x1 x2 y>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y2)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y2)
+ call void @llvm.lifetime.start.p0(ptr %y2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y2)
; CHECK-NEXT: Alive: <x1 x2 y y2>
call void @capture64(ptr nonnull %y2)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y2)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y2)
+ call void @llvm.lifetime.end.p0(ptr %y2)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y2)
; CHECK-NEXT: Alive: <x1 x2 y>
br label %if.end
@@ -209,8 +209,8 @@ if.else: ; preds = %if.then
if.end: ; preds = %if.else, %if.then3
; CHECK: if.end:
; CHECK-NEXT: Alive: <x1 x2 y>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <x1 x2>
br label %if.end9
@@ -222,8 +222,8 @@ if.else4: ; preds = %entry
; CHECK: %z.cast = bitcast ptr %z to ptr
; CHECK-NEXT: Alive: <x1 x2>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %z)
; CHECK-NEXT: Alive: <x1 x2 z>
call void @capture64(ptr nonnull %z)
@@ -232,13 +232,13 @@ if.else4: ; preds = %entry
if.then6: ; preds = %if.else4
; CHECK: if.then6:
; CHECK-NEXT: Alive: <x1 x2 z>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z1)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z1)
+ call void @llvm.lifetime.start.p0(ptr %z1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %z1)
; CHECK-NEXT: Alive: <x1 x2 z z1>
call void @capture64(ptr nonnull %z1)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z1)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z1)
+ call void @llvm.lifetime.end.p0(ptr %z1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %z1)
; CHECK-NEXT: Alive: <x1 x2 z>
br label %if.end8
@@ -246,13 +246,13 @@ if.then6: ; preds = %if.else4
if.else7: ; preds = %if.else4
; CHECK: if.else7:
; CHECK-NEXT: Alive: <x1 x2 z>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z2)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %z2)
+ call void @llvm.lifetime.start.p0(ptr %z2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %z2)
; CHECK-NEXT: Alive: <x1 x2 z z2>
call void @capture64(ptr nonnull %z2)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z2)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z2)
+ call void @llvm.lifetime.end.p0(ptr %z2)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %z2)
; CHECK-NEXT: Alive: <x1 x2 z>
br label %if.end8
@@ -260,8 +260,8 @@ if.else7: ; preds = %if.else4
if.end8: ; preds = %if.else7, %if.then6
; CHECK: if.end8:
; CHECK-NEXT: Alive: <x1 x2 z>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %z)
; CHECK-NEXT: Alive: <x1 x2>
br label %if.end9
@@ -269,12 +269,12 @@ if.end8: ; preds = %if.else7, %if.then6
if.end9: ; preds = %if.end8, %if.end
; CHECK: if.end9:
; CHECK-NEXT: Alive: <x1 x2>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x2)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x2)
+ call void @llvm.lifetime.end.p0(ptr %x2)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x2)
; CHECK-NEXT: Alive: <x1>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x1)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x1)
+ call void @llvm.lifetime.end.p0(ptr %x1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x1)
; CHECK-NEXT: Alive: <>
ret void
@@ -287,8 +287,8 @@ entry:
; CHECK-NEXT: Alive: <>
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
call void @capture32(ptr %x)
@@ -297,17 +297,17 @@ entry:
bb2: ; preds = %entry
; CHECK: bb2:
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <x y>
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
ret void
@@ -315,8 +315,8 @@ bb2: ; preds = %entry
bb3: ; preds = %entry
; CHECK: bb3:
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
ret void
@@ -329,13 +329,13 @@ entry:
; CHECK-NEXT: Alive: <>
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
br i1 %d, label %bb2, label %bb3
@@ -343,13 +343,13 @@ entry:
bb2: ; preds = %entry
; CHECK: bb2:
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y>
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <>
ret void
@@ -367,13 +367,13 @@ entry:
; CHECK-NEXT: Alive: <>
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
br i1 %d, label %bb2, label %bb3
@@ -381,8 +381,8 @@ entry:
bb2: ; preds = %entry
; CHECK: bb2:
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y>
call void @capture32(ptr %y)
@@ -401,8 +401,8 @@ entry:
; CHECK-NEXT: Alive: <>
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
call void @capture32(ptr %x)
@@ -411,12 +411,12 @@ entry:
bb2: ; preds = %entry
; CHECK: bb2:
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y>
call void @capture32(ptr %y)
@@ -436,8 +436,8 @@ entry:
%x = alloca i32, align 4
%y = alloca i32, align 4
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
br i1 %d, label %bb2, label %bb3
@@ -445,8 +445,8 @@ entry:
bb2: ; preds = %entry
; CHECK: bb2:
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <x y>
call void @capture32(ptr %y)
@@ -467,12 +467,12 @@ entry:
%B.i2 = alloca [100 x i32], align 4
%A.i = alloca [100 x i32], align 4
%B.i = alloca [100 x i32], align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i)
+ call void @llvm.lifetime.start.p0(ptr %A.i)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %A.i)
; CHECK-NEXT: Alive: <A.i>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i)
+ call void @llvm.lifetime.start.p0(ptr %B.i)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %B.i)
; CHECK-NEXT: Alive: <A.i B.i>
call void @capture100x32(ptr %A.i)
@@ -480,30 +480,30 @@ entry:
; CHECK-NEXT: Alive: <A.i B.i>
call void @capture100x32(ptr %B.i)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i)
+ call void @llvm.lifetime.end.p0(ptr %A.i)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %A.i)
; CHECK-NEXT: Alive: <B.i>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i)
+ call void @llvm.lifetime.end.p0(ptr %B.i)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %B.i)
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i1)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i1)
+ call void @llvm.lifetime.start.p0(ptr %A.i1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %A.i1)
; CHECK-NEXT: Alive: <A.i1>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i2)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i2)
+ call void @llvm.lifetime.start.p0(ptr %B.i2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %B.i2)
; CHECK-NEXT: Alive: <A.i1 B.i2>
call void @capture100x32(ptr %A.i1)
call void @capture100x32(ptr %B.i2)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i1)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i1)
+ call void @llvm.lifetime.end.p0(ptr %A.i1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %A.i1)
; CHECK-NEXT: Alive: <B.i2>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i2)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i2)
+ call void @llvm.lifetime.end.p0(ptr %B.i2)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %B.i2)
; CHECK-NEXT: Alive: <>
ret void
@@ -516,20 +516,20 @@ entry:
; CHECK-NEXT: Alive: <>
%buf1 = alloca i8, i32 100000, align 16
%buf2 = alloca i8, i32 100000, align 16
- call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1)
+ call void @llvm.lifetime.start.p0(ptr %buf1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %buf1)
; CHECK-NEXT: Alive: <buf1>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %buf1)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %buf1)
+ call void @llvm.lifetime.end.p0(ptr %buf1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %buf1)
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1)
+ call void @llvm.lifetime.start.p0(ptr %buf1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %buf1)
; CHECK-NEXT: Alive: <buf1>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %buf2)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %buf2)
+ call void @llvm.lifetime.start.p0(ptr %buf2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %buf2)
; CHECK-NEXT: Alive: <buf1 buf2>
call void @capture8(ptr %buf1)
@@ -546,22 +546,22 @@ entry:
%B.i2 = alloca [100 x i32], align 4
%A.i = alloca [100 x i32], align 4
%B.i = alloca [100 x i32], align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i)
+ call void @llvm.lifetime.start.p0(ptr %A.i)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %A.i)
; CHECK-NEXT: Alive: <A.i A.i1 B.i2>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i)
+ call void @llvm.lifetime.start.p0(ptr %B.i)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %B.i)
; CHECK-NEXT: Alive: <A.i A.i1 B.i B.i2>
call void @capture100x32(ptr %A.i)
call void @capture100x32(ptr %B.i)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i)
+ call void @llvm.lifetime.end.p0(ptr %A.i)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %A.i)
; CHECK-NEXT: Alive: <A.i1 B.i B.i2>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i)
+ call void @llvm.lifetime.end.p0(ptr %B.i)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %B.i)
; CHECK-NEXT: Alive: <A.i1 B.i2>
br label %block2
@@ -583,23 +583,23 @@ entry:
; CHECK-NEXT: Alive: <>
%a.i = alloca [4 x %struct.Klass], align 16
%b.i = alloca [4 x %struct.Klass], align 16
- call void @llvm.lifetime.start.p0(i64 -1, ptr %a.i)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %a.i)
+ call void @llvm.lifetime.start.p0(ptr %a.i)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %a.i)
; CHECK-NEXT: Alive: <a.i>
- call void @llvm.lifetime.start.p0(i64 -1, ptr %b.i)
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %b.i)
+ call void @llvm.lifetime.start.p0(ptr %b.i)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %b.i)
; CHECK-NEXT: Alive: <a.i b.i>
call void @capture8(ptr %a.i)
call void @capture8(ptr %b.i)
%z3 = load i32, ptr %a.i, align 16
- call void @llvm.lifetime.end.p0(i64 -1, ptr %a.i)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %a.i)
+ call void @llvm.lifetime.end.p0(ptr %a.i)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %a.i)
; CHECK-NEXT: Alive: <b.i>
- call void @llvm.lifetime.end.p0(i64 -1, ptr %b.i)
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %b.i)
+ call void @llvm.lifetime.end.p0(ptr %b.i)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %b.i)
; CHECK-NEXT: Alive: <>
ret i32 %z3
@@ -611,8 +611,8 @@ entry:
; CHECK: entry:
; CHECK-NEXT: Alive: <>
%x = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
br label %l2
@@ -622,8 +622,8 @@ l2: ; preds = %l2, %entry
; MAY-NEXT: Alive: <x>
; MUST-NEXT: Alive: <>
call void @capture8(ptr %x)
- call void @llvm.lifetime.end.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
br label %l2
@@ -636,8 +636,8 @@ entry:
; CHECK-NEXT: Alive: <>
%x = alloca i8, align 4
%y = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
br label %l2
@@ -645,17 +645,17 @@ entry:
l2: ; preds = %l2, %entry
; CHECK: l2:
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <x y>
call void @capture8(ptr %y)
- call void @llvm.lifetime.end.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
call void @capture8(ptr %x)
@@ -677,24 +677,24 @@ entry:
if.then: ; preds = %entry
; CHECK: if.then:
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a)
-; CHECK: call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
+; CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %a)
; CHECK-NEXT: Alive: <a>
tail call void @capture8(ptr %a)
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
-; CHECK: call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
+; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %a)
; CHECK-NEXT: Alive: <>
br label %if.end
if.else: ; preds = %entry
; CHECK: if.else:
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %b)
-; CHECK: call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %b)
+ call void @llvm.lifetime.start.p0(ptr nonnull %b)
+; CHECK: call void @llvm.lifetime.start.p0(ptr nonnull %b)
; CHECK-NEXT: Alive: <b>
tail call void @capture8(ptr %b)
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %b)
-; CHECK: call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
+; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %b)
; CHECK-NEXT: Alive: <>
br label %if.end
@@ -719,8 +719,8 @@ entry:
if.then:
; CHECK: if.then:
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y>
br label %if.end
@@ -730,12 +730,12 @@ if.then:
if.else:
; CHECK: if.else:
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y>
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x y>
br label %if.end
@@ -758,12 +758,12 @@ entry:
%x = alloca i8, align 4
%y = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y>
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x y>
br label %end
@@ -773,7 +773,7 @@ entry:
dead:
; CHECK: dead:
; CHECK-NOT: Alive:
- call void @llvm.lifetime.start.p0(i64 4, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
br label %end
; CHECK: br label %end
@@ -792,20 +792,20 @@ entry:
; CHECK: entry:
; CHECK-NEXT: Alive: <>
%x = alloca i8
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.end.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.end.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
ret void
@@ -827,8 +827,8 @@ if.then:
; CHECK: if.then:
; MAY-NEXT: Alive: <x y>
; MUST-NEXT: Alive: <>
- call void @llvm.lifetime.end.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; MAY-NEXT: Alive: <x>
; MUST-NEXT: Alive: <>
@@ -840,12 +840,12 @@ if.then:
if.else:
; CHECK: if.else:
; CHECK-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <y>
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x y>
br label %if.then
@@ -868,8 +868,8 @@ entry:
%x = alloca i8, align 4
%y = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
br i1 %a, label %if.then, label %if.else
@@ -880,8 +880,8 @@ if.then:
; CHECK: if.then:
; MAY-NEXT: Alive: <x>
; MUST-NEXT: Alive: <>
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; MAY-NEXT: Alive: <x y>
; MUST-NEXT: Alive: <y>
@@ -893,12 +893,12 @@ if.then:
if.else:
; CHECK: if.else:
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.end.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %y)
; CHECK-NEXT: Alive: <x>
- call void @llvm.lifetime.end.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: Alive: <>
br label %if.then
@@ -921,8 +921,8 @@ entry:
%x = alloca i8, align 4
%y = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %x)
; CHECK-NEXT: Alive: <x>
br i1 %a, label %if.then, label %if.end
@@ -933,8 +933,8 @@ if.then:
; CHECK: if.then:
; MAY-NEXT: Alive: <x y>
; MUST-NEXT: Alive: <x>
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %y)
; CHECK-NEXT: Alive: <x y>
br i1 %a, label %if.then, label %if.end
@@ -949,8 +949,8 @@ if.end:
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr captures(none))
-declare void @llvm.lifetime.end.p0(i64, ptr captures(none))
+declare void @llvm.lifetime.start.p0(ptr captures(none))
+declare void @llvm.lifetime.end.p0(ptr captures(none))
declare void @capture8(ptr)
declare void @capture32(ptr)
declare void @capture64(ptr)
diff --git a/llvm/test/Analysis/StackSafetyAnalysis/local.ll b/llvm/test/Analysis/StackSafetyAnalysis/local.ll
index 02d46c8..6944f38 100644
--- a/llvm/test/Analysis/StackSafetyAnalysis/local.ll
+++ b/llvm/test/Analysis/StackSafetyAnalysis/local.ll
@@ -707,9 +707,9 @@ entry:
%n = load i8, ptr %y
call void @llvm.memset.p0.i32(ptr nonnull %z, i8 0, i32 1, i1 false)
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
- call void @llvm.lifetime.start.p0(i64 1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %z)
ret void
}
@@ -731,9 +731,9 @@ entry:
%y = alloca i8, align 4
%z = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
- call void @llvm.lifetime.start.p0(i64 1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %z)
store i8 5, ptr %x
%n = load i8, ptr %y
@@ -756,13 +756,13 @@ entry:
%y = alloca i8, align 4
%z = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr %x)
- call void @llvm.lifetime.start.p0(i64 1, ptr %y)
- call void @llvm.lifetime.start.p0(i64 1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %z)
- call void @llvm.lifetime.end.p0(i64 1, ptr %x)
- call void @llvm.lifetime.end.p0(i64 1, ptr %y)
- call void @llvm.lifetime.end.p0(i64 1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %z)
store i8 5, ptr %x
%n = load i8, ptr %y
@@ -973,13 +973,13 @@ define void @DoubleLifetime() {
; CHECK-EMPTY:
entry:
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 true)
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -993,13 +993,13 @@ define void @DoubleLifetime2() {
; CHECK-EMPTY:
entry:
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
%n = load i32, ptr %a
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -1013,13 +1013,13 @@ define void @DoubleLifetime3() {
; CHECK-EMPTY:
entry:
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
store i32 5, ptr %a
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -1033,9 +1033,9 @@ define void @DoubleLifetime4() {
; CHECK-EMPTY:
entry:
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memset.p0.i32(ptr %a, i8 1, i32 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
call void @unknown_call(ptr %a)
ret void
}
@@ -1136,5 +1136,5 @@ entry:
ret ptr null
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Assembler/auto_upgrade_intrinsics.ll b/llvm/test/Assembler/auto_upgrade_intrinsics.ll
index d1b535b..37cb496 100644
--- a/llvm/test/Assembler/auto_upgrade_intrinsics.ll
+++ b/llvm/test/Assembler/auto_upgrade_intrinsics.ll
@@ -171,10 +171,10 @@ define void @tests.lifetime.start.end() {
; CHECK-LABEL: @tests.lifetime.start.end(
%a = alloca i8
call void @llvm.lifetime.start(i64 1, ptr %a)
- ; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %a)
+ ; CHECK: call void @llvm.lifetime.start.p0(ptr %a)
store i8 0, ptr %a
call void @llvm.lifetime.end(i64 1, ptr %a)
- ; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %a)
+ ; CHECK: call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -185,10 +185,10 @@ define void @tests.lifetime.start.end.unnamed() {
; CHECK-LABEL: @tests.lifetime.start.end.unnamed(
%a = alloca ptr
call void @llvm.lifetime.start.unnamed(i64 1, ptr %a)
- ; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %a)
+ ; CHECK: call void @llvm.lifetime.start.p0(ptr %a)
store ptr null, ptr %a
call void @llvm.lifetime.end.unnamed(i64 1, ptr %a)
- ; CHECK: call void @llvm.lifetime.end.p0(i64 1, ptr %a)
+ ; CHECK: call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -220,5 +220,5 @@ define void @test.prefetch.unnamed(ptr %ptr) {
; emitted at the end.
; CHECK: declare i32 @llvm.objectsize.i32.p0
-; CHECK: declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none))
-; CHECK: declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none))
+; CHECK: declare void @llvm.lifetime.start.p0(ptr captures(none))
+; CHECK: declare void @llvm.lifetime.end.p0(ptr captures(none))
diff --git a/llvm/test/Assembler/autoupgrade-lifetime-intrinsics.ll b/llvm/test/Assembler/autoupgrade-lifetime-intrinsics.ll
index 00ab934..377c002 100644
--- a/llvm/test/Assembler/autoupgrade-lifetime-intrinsics.ll
+++ b/llvm/test/Assembler/autoupgrade-lifetime-intrinsics.ll
@@ -5,8 +5,8 @@ define void @strip_bitcast() {
; CHECK-LABEL: define void @strip_bitcast() {
; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[B:%.*]] = bitcast ptr [[A]] to ptr
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i8
@@ -20,8 +20,8 @@ define void @strip_addrspacecast() {
; CHECK-LABEL: define void @strip_addrspacecast() {
; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[B:%.*]] = addrspacecast ptr [[A]] to ptr addrspace(1)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i8
@@ -35,8 +35,8 @@ define void @strip_gep() {
; CHECK-LABEL: define void @strip_gep() {
; CHECK-NEXT: [[A:%.*]] = alloca [2 x i8], align 1
; CHECK-NEXT: [[B:%.*]] = getelementptr [2 x i8], ptr [[A]], i64 0, i64 0
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca [2 x i8]
@@ -55,3 +55,8 @@ define void @remove_unanalyzable(ptr %p) {
call void @llvm.lifetime.end.p0(i64 1, ptr %p)
ret void
}
+
+declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p1(i64, ptr addrspace(1))
+declare void @llvm.lifetime.end.p1(i64, ptr addrspace(1))
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir
index 437a9e6..3f14162 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/vararg.mir
@@ -10,7 +10,7 @@
define i32 @va_start(ptr %a, ...) {
entry:
%ap = alloca %struct.__va_list, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %ap)
+ call void @llvm.lifetime.start.p0(ptr %ap)
call void @llvm.va_start.p0(ptr %ap)
%vr_offs_p = getelementptr inbounds i8, ptr %ap, i64 28
%vr_offs = load i32, ptr %vr_offs_p, align 4
diff --git a/llvm/test/CodeGen/AArch64/aarch64-histcnt-dag-combine-hang.ll b/llvm/test/CodeGen/AArch64/aarch64-histcnt-dag-combine-hang.ll
new file mode 100644
index 0000000..da04c67
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-histcnt-dag-combine-hang.ll
@@ -0,0 +1,70 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mattr=+sve2 -verify-machineinstrs < %s -o - | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; This test is reduced from a real world example that would cause the DAGCombiner to hang.
+
+define void @histcnt_loop(ptr %0, i64 %1, ptr %2, i64 %3, i64 %4) {
+; CHECK-LABEL: histcnt_loop:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov z0.d, #1 // =0x1
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov x8, xzr
+; CHECK-NEXT: add x9, x0, x1
+; CHECK-NEXT: .LBB0_1: // %loop
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, x8, lsl #1]
+; CHECK-NEXT: lsl x10, x8, #1
+; CHECK-NEXT: add x11, x0, x10
+; CHECK-NEXT: add x10, x9, x10
+; CHECK-NEXT: lsl z1.d, z1.d, #1
+; CHECK-NEXT: ld1h { z4.d }, p0/z, [x11, #1, mul vl]
+; CHECK-NEXT: ld1h { z5.d }, p0/z, [x10, #1, mul vl]
+; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d
+; CHECK-NEXT: ld1h { z3.d }, p0/z, [x2, z1.d]
+; CHECK-NEXT: mad z2.d, p0/m, z0.d, z3.d
+; CHECK-NEXT: ld1h { z3.d }, p0/z, [x9, x8, lsl #1]
+; CHECK-NEXT: add x8, x8, x3
+; CHECK-NEXT: cmp x4, x8
+; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d]
+; CHECK-NEXT: lsl z1.d, z4.d, #1
+; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d
+; CHECK-NEXT: ld1h { z4.d }, p0/z, [x2, z1.d]
+; CHECK-NEXT: mad z2.d, p0/m, z0.d, z4.d
+; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d]
+; CHECK-NEXT: lsl z1.d, z3.d, #1
+; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d
+; CHECK-NEXT: ld1h { z3.d }, p0/z, [x2, z1.d]
+; CHECK-NEXT: mad z2.d, p0/m, z0.d, z3.d
+; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d]
+; CHECK-NEXT: lsl z1.d, z5.d, #1
+; CHECK-NEXT: histcnt z2.d, p0/z, z1.d, z1.d
+; CHECK-NEXT: ld1h { z3.d }, p0/z, [x2, z1.d]
+; CHECK-NEXT: mad z2.d, p0/m, z0.d, z3.d
+; CHECK-NEXT: st1h { z2.d }, p0, [x2, z1.d]
+; CHECK-NEXT: b.ne .LBB0_1
+; CHECK-NEXT: // %bb.2: // %exit
+; CHECK-NEXT: ret
+entry:
+ br label %loop
+
+loop:
+ %6 = phi i64 [ 0, %entry ], [ %15, %loop ]
+ %7 = getelementptr inbounds nuw i16, ptr %0, i64 %6
+ %8 = getelementptr inbounds nuw i8, ptr %7, i64 %1
+ %9 = load <vscale x 4 x i16>, ptr %7, align 2
+ %10 = load <vscale x 4 x i16>, ptr %8, align 2
+ %11 = zext <vscale x 4 x i16> %9 to <vscale x 4 x i64>
+ %12 = zext <vscale x 4 x i16> %10 to <vscale x 4 x i64>
+ %13 = getelementptr inbounds nuw [16 x i16], ptr %2, i64 0, <vscale x 4 x i64> %11
+ %14 = getelementptr inbounds nuw [16 x i16], ptr %2, i64 0, <vscale x 4 x i64> %12
+ call void @llvm.experimental.vector.histogram.add.nxv4p0.i16(<vscale x 4 x ptr> %13, i16 1, <vscale x 4 x i1> splat (i1 true))
+ call void @llvm.experimental.vector.histogram.add.nxv4p0.i16(<vscale x 4 x ptr> %14, i16 1, <vscale x 4 x i1> splat (i1 true))
+ %15 = add nuw i64 %6, %3
+ %16 = icmp eq i64 %15, %4
+ br i1 %16, label %exit, label %loop
+
+exit:
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll b/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
index 256ff94..9a1b6a0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
@@ -70,6 +70,23 @@ entry:
ret <2 x i64> %add.i
}
+define void @test_commutable_vaddl_s8(<8 x i8> %a, <8 x i8> %b, ptr %c) {
+; CHECK-LABEL: test_commutable_vaddl_s8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: saddl v0.8h, v0.8b, v1.8b
+; CHECK-NEXT: stp q0, q0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %vmovl.i.i = sext <8 x i8> %a to <8 x i16>
+ %vmovl.i2.i = sext <8 x i8> %b to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i
+ store <8 x i16> %add.i, ptr %c
+ %add.i2 = add <8 x i16> %vmovl.i2.i, %vmovl.i.i
+ %c.gep.1 = getelementptr i8, ptr %c, i64 16
+ store <8 x i16> %add.i2, ptr %c.gep.1
+ ret void
+}
+
define <8 x i16> @test_vaddl_u8(<8 x i8> %a, <8 x i8> %b) {
; CHECK-LABEL: test_vaddl_u8:
; CHECK: // %bb.0: // %entry
@@ -106,6 +123,23 @@ entry:
ret <2 x i64> %add.i
}
+define void @test_commutable_vaddl_u8(<8 x i8> %a, <8 x i8> %b, ptr %c) {
+; CHECK-LABEL: test_commutable_vaddl_u8:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: uaddl v0.8h, v0.8b, v1.8b
+; CHECK-NEXT: stp q0, q0, [x0]
+; CHECK-NEXT: ret
+entry:
+ %vmovl.i.i = zext <8 x i8> %a to <8 x i16>
+ %vmovl.i2.i = zext <8 x i8> %b to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i
+ store <8 x i16> %add.i, ptr %c
+ %add.i2 = add <8 x i16> %vmovl.i2.i, %vmovl.i.i
+ %c.gep.1 = getelementptr i8, ptr %c, i64 16
+ store <8 x i16> %add.i2, ptr %c.gep.1
+ ret void
+}
+
define <8 x i16> @test_vaddl_a8(<8 x i8> %a, <8 x i8> %b) {
; CHECK-SD-LABEL: test_vaddl_a8:
; CHECK-SD: // %bb.0: // %entry
@@ -2892,9 +2926,9 @@ define <8 x i16> @cmplx_mul_combined_re_im(<8 x i16> noundef %a, i64 %scale.coer
; CHECK-GI-LABEL: cmplx_mul_combined_re_im:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: lsr x9, x0, #16
-; CHECK-GI-NEXT: adrp x8, .LCPI196_0
+; CHECK-GI-NEXT: adrp x8, .LCPI198_0
; CHECK-GI-NEXT: rev32 v4.8h, v0.8h
-; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI196_0]
+; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI198_0]
; CHECK-GI-NEXT: fmov d1, x9
; CHECK-GI-NEXT: dup v2.8h, v1.h[0]
; CHECK-GI-NEXT: sqneg v1.8h, v2.8h
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
index ecf3f69..0d427c0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-mul-div.ll
@@ -1608,6 +1608,18 @@ define <16 x i8> @poly_mulv16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
ret <16 x i8> %prod
}
+define <16 x i8> @commutable_poly_mul(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK-LABEL: commutable_poly_mul:
+; CHECK: // %bb.0:
+; CHECK-NEXT: pmul v0.16b, v0.16b, v1.16b
+; CHECK-NEXT: add v0.16b, v0.16b, v0.16b
+; CHECK-NEXT: ret
+ %1 = call <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+ %2 = call <16 x i8> @llvm.aarch64.neon.pmul.v16i8(<16 x i8> %rhs, <16 x i8> %lhs)
+ %3 = add <16 x i8> %1, %2
+ ret <16 x i8> %3
+}
+
declare <4 x i16> @llvm.aarch64.neon.sqdmulh.v4i16(<4 x i16>, <4 x i16>)
declare <8 x i16> @llvm.aarch64.neon.sqdmulh.v8i16(<8 x i16>, <8 x i16>)
declare <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32>, <2 x i32>)
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index 78881c8..ede5a7c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -44,6 +44,35 @@ define <2 x i64> @sabdl2d(ptr %A, ptr %B) nounwind {
ret <2 x i64> %tmp4
}
+define void @commutable_sabdl(ptr %A, ptr %B, ptr %C) nounwind {
+; CHECK-SD-LABEL: commutable_sabdl:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr d0, [x0]
+; CHECK-SD-NEXT: ldr d1, [x1]
+; CHECK-SD-NEXT: sabdl.8h v0, v1, v0
+; CHECK-SD-NEXT: str q0, [x2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commutable_sabdl:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr d0, [x0]
+; CHECK-GI-NEXT: ldr d1, [x1]
+; CHECK-GI-NEXT: sabdl.8h v0, v0, v1
+; CHECK-GI-NEXT: str q0, [x2]
+; CHECK-GI-NEXT: str q0, [x2]
+; CHECK-GI-NEXT: ret
+ %tmp1 = load <8 x i8>, ptr %A
+ %tmp2 = load <8 x i8>, ptr %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ %tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
+ store <8 x i16> %tmp4, ptr %C
+ %tmp5 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp1)
+ %tmp6 = zext <8 x i8> %tmp5 to <8 x i16>
+ %tmp7 = getelementptr i8, ptr %C, i64 16
+ store <8 x i16> %tmp6, ptr %C
+ ret void
+}
+
define <8 x i16> @sabdl2_8h(ptr %A, ptr %B) nounwind {
; CHECK-SD-LABEL: sabdl2_8h:
; CHECK-SD: // %bb.0:
@@ -155,6 +184,35 @@ define <2 x i64> @uabdl2d(ptr %A, ptr %B) nounwind {
ret <2 x i64> %tmp4
}
+define void @commutable_uabdl(ptr %A, ptr %B, ptr %C) nounwind {
+; CHECK-SD-LABEL: commutable_uabdl:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ldr d0, [x0]
+; CHECK-SD-NEXT: ldr d1, [x1]
+; CHECK-SD-NEXT: uabdl.8h v0, v1, v0
+; CHECK-SD-NEXT: str q0, [x2]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: commutable_uabdl:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: ldr d0, [x0]
+; CHECK-GI-NEXT: ldr d1, [x1]
+; CHECK-GI-NEXT: uabdl.8h v0, v0, v1
+; CHECK-GI-NEXT: str q0, [x2]
+; CHECK-GI-NEXT: str q0, [x2]
+; CHECK-GI-NEXT: ret
+ %tmp1 = load <8 x i8>, ptr %A
+ %tmp2 = load <8 x i8>, ptr %B
+ %tmp3 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ %tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
+ store <8 x i16> %tmp4, ptr %C
+ %tmp5 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp1)
+ %tmp6 = zext <8 x i8> %tmp5 to <8 x i16>
+ %tmp7 = getelementptr i8, ptr %C, i64 16
+ store <8 x i16> %tmp6, ptr %C
+ ret void
+}
+
define <8 x i16> @uabdl2_8h(ptr %A, ptr %B) nounwind {
; CHECK-SD-LABEL: uabdl2_8h:
; CHECK-SD: // %bb.0:
diff --git a/llvm/test/CodeGen/AArch64/arm64-vmul.ll b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
index 07400bb..d12f7ce 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vmul.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
@@ -3,6 +3,7 @@
; RUN: llc -mtriple=aarch64-none-elf -mattr=+aes -global-isel -global-isel-abort=2 2>&1 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; CHECK-GI: warning: Instruction selection used fallback path for pmull8h
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for commutable_pmull8h
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmulh_1s
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_2s
; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_4s
@@ -78,6 +79,20 @@ define <2 x i64> @smull2d(ptr %A, ptr %B) nounwind {
ret <2 x i64> %tmp3
}
+define void @commutable_smull(<2 x i32> %A, <2 x i32> %B, ptr %C) {
+; CHECK-LABEL: commutable_smull:
+; CHECK: // %bb.0:
+; CHECK-NEXT: smull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT: stp q0, q0, [x0]
+; CHECK-NEXT: ret
+ %1 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %A, <2 x i32> %B)
+ %2 = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %B, <2 x i32> %A)
+ store <2 x i64> %1, ptr %C
+ %3 = getelementptr i8, ptr %C, i64 16
+ store <2 x i64> %2, ptr %3
+ ret void
+}
+
declare <8 x i16> @llvm.aarch64.neon.smull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
declare <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
declare <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
@@ -121,6 +136,20 @@ define <2 x i64> @umull2d(ptr %A, ptr %B) nounwind {
ret <2 x i64> %tmp3
}
+define void @commutable_umull(<2 x i32> %A, <2 x i32> %B, ptr %C) {
+; CHECK-LABEL: commutable_umull:
+; CHECK: // %bb.0:
+; CHECK-NEXT: umull v0.2d, v0.2s, v1.2s
+; CHECK-NEXT: stp q0, q0, [x0]
+; CHECK-NEXT: ret
+ %1 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %A, <2 x i32> %B)
+ %2 = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %B, <2 x i32> %A)
+ store <2 x i64> %1, ptr %C
+ %3 = getelementptr i8, ptr %C, i64 16
+ store <2 x i64> %2, ptr %3
+ ret void
+}
+
declare <8 x i16> @llvm.aarch64.neon.umull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
declare <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
declare <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
@@ -212,6 +241,20 @@ define <8 x i16> @pmull8h(ptr %A, ptr %B) nounwind {
ret <8 x i16> %tmp3
}
+define void @commutable_pmull8h(<8 x i8> %A, <8 x i8> %B, ptr %C) {
+; CHECK-LABEL: commutable_pmull8h:
+; CHECK: // %bb.0:
+; CHECK-NEXT: pmull v0.8h, v0.8b, v1.8b
+; CHECK-NEXT: stp q0, q0, [x0]
+; CHECK-NEXT: ret
+ %1 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %A, <8 x i8> %B)
+ %2 = call <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8> %B, <8 x i8> %A)
+ store <8 x i16> %1, ptr %C
+ %3 = getelementptr i8, ptr %C, i8 16
+ store <8 x i16> %2, ptr %3
+ ret void
+}
+
declare <8 x i16> @llvm.aarch64.neon.pmull.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
define <4 x i16> @sqdmulh_4h(ptr %A, ptr %B) nounwind {
@@ -487,10 +530,10 @@ define void @smlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2,
; CHECK-GI-LABEL: smlal2d_chain_with_constant:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: mvn v3.8b, v2.8b
-; CHECK-GI-NEXT: adrp x8, .LCPI27_0
+; CHECK-GI-NEXT: adrp x8, .LCPI30_0
; CHECK-GI-NEXT: smull v1.2d, v1.2s, v3.2s
; CHECK-GI-NEXT: smlal v1.2d, v0.2s, v2.2s
-; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI27_0]
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI30_0]
; CHECK-GI-NEXT: add v0.2d, v1.2d, v0.2d
; CHECK-GI-NEXT: str q0, [x0]
; CHECK-GI-NEXT: ret
@@ -566,8 +609,8 @@ define void @smlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2,
;
; CHECK-GI-LABEL: smlsl2d_chain_with_constant:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI31_0
-; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI31_0]
+; CHECK-GI-NEXT: adrp x8, .LCPI34_0
+; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI34_0]
; CHECK-GI-NEXT: smlsl v3.2d, v0.2s, v2.2s
; CHECK-GI-NEXT: mvn v0.8b, v2.8b
; CHECK-GI-NEXT: smlsl v3.2d, v1.2s, v0.2s
@@ -829,10 +872,10 @@ define void @umlal2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2,
; CHECK-GI-LABEL: umlal2d_chain_with_constant:
; CHECK-GI: // %bb.0:
; CHECK-GI-NEXT: mvn v3.8b, v2.8b
-; CHECK-GI-NEXT: adrp x8, .LCPI43_0
+; CHECK-GI-NEXT: adrp x8, .LCPI46_0
; CHECK-GI-NEXT: umull v1.2d, v1.2s, v3.2s
; CHECK-GI-NEXT: umlal v1.2d, v0.2s, v2.2s
-; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI43_0]
+; CHECK-GI-NEXT: ldr q0, [x8, :lo12:.LCPI46_0]
; CHECK-GI-NEXT: add v0.2d, v1.2d, v0.2d
; CHECK-GI-NEXT: str q0, [x0]
; CHECK-GI-NEXT: ret
@@ -908,8 +951,8 @@ define void @umlsl2d_chain_with_constant(ptr %dst, <2 x i32> %v1, <2 x i32> %v2,
;
; CHECK-GI-LABEL: umlsl2d_chain_with_constant:
; CHECK-GI: // %bb.0:
-; CHECK-GI-NEXT: adrp x8, .LCPI47_0
-; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI47_0]
+; CHECK-GI-NEXT: adrp x8, .LCPI50_0
+; CHECK-GI-NEXT: ldr q3, [x8, :lo12:.LCPI50_0]
; CHECK-GI-NEXT: umlsl v3.2d, v0.2s, v2.2s
; CHECK-GI-NEXT: mvn v0.8b, v2.8b
; CHECK-GI-NEXT: umlsl v3.2d, v1.2s, v0.2s
@@ -3222,6 +3265,20 @@ define <16 x i8> @test_pmull_high_64(<2 x i64> %l, <2 x i64> %r) nounwind {
ret <16 x i8> %val
}
+define <16 x i8> @test_commutable_pmull_64(i64 %l, i64 %r) nounwind {
+; CHECK-LABEL: test_commutable_pmull_64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: fmov d0, x1
+; CHECK-NEXT: fmov d1, x0
+; CHECK-NEXT: pmull v0.1q, v1.1d, v0.1d
+; CHECK-NEXT: add v0.16b, v0.16b, v0.16b
+; CHECK-NEXT: ret
+ %1 = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %l, i64 %r)
+ %2 = call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %r, i64 %l)
+ %3 = add <16 x i8> %1, %2
+ ret <16 x i8> %3
+}
+
declare <16 x i8> @llvm.aarch64.neon.pmull64(i64, i64)
define <1 x i64> @test_mul_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) nounwind {
diff --git a/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir b/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir
index 23ac67c..805d244 100644
--- a/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir
+++ b/llvm/test/CodeGen/AArch64/bsp_implicit_ops.mir
@@ -96,3 +96,23 @@ body: |
$q25 = ORRv16i8 $q3, killed $q3
RET_ReallyLR implicit $q22
...
+---
+name: DoubleOp
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $q2
+
+ ; CHECK-LABEL: name: DoubleOp
+ ; CHECK: liveins: $q2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $q0 = MOVIv8i16 1, 0
+ ; CHECK-NEXT: renamable $q1 = ORRv16i8 renamable $q2, renamable $q2
+ ; CHECK-NEXT: renamable $q1 = BSLv16i8 killed renamable $q1, renamable $q2, renamable $q0
+ ; CHECK-NEXT: renamable $q0 = SQADDv8i16 killed renamable $q1, killed renamable $q0
+ ; CHECK-NEXT: RET undef $lr, implicit $q0
+ renamable $q0 = MOVIv8i16 1, 0
+ renamable $q1 = BSPv16i8 killed renamable $q2, renamable $q2, renamable $q0
+ renamable $q0 = SQADDv8i16 killed renamable $q1, killed renamable $q0
+ RET_ReallyLR implicit $q0
+...
diff --git a/llvm/test/CodeGen/AArch64/lifetime-poison.ll b/llvm/test/CodeGen/AArch64/lifetime-poison.ll
index e04530d..dfb76d1 100644
--- a/llvm/test/CodeGen/AArch64/lifetime-poison.ll
+++ b/llvm/test/CodeGen/AArch64/lifetime-poison.ll
@@ -8,7 +8,7 @@ define void @test() {
; CHECK-LABEL: test:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
- call void @llvm.lifetime.start.p0(i64 4, ptr poison)
- call void @llvm.lifetime.end.p0(i64 4, ptr poison)
+ call void @llvm.lifetime.start.p0(ptr poison)
+ call void @llvm.lifetime.end.p0(ptr poison)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll b/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll
index 0711f69..df83762 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-initializer-merge.ll
@@ -5,8 +5,8 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64--linux-android"
declare void @use(ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
define void @OneVarNoInit() sanitize_memtag {
@@ -16,18 +16,18 @@ define void @OneVarNoInit() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[TX]], i64 16)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -39,19 +39,19 @@ define void @OneVarInitConst() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 42, i64 0)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store i32 42, ptr %x, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -64,21 +64,21 @@ define void @ArrayInitConst() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 16, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 42, i64 0)
; CHECK-NEXT: [[TX8_16:%.*]] = getelementptr i8, ptr [[TX]], i32 16
; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX8_16]], i64 48)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 64)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, i32 16, align 4
- call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store i32 42, ptr %x, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -90,7 +90,7 @@ define void @ArrayInitConst2() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 16, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 1
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TX]], i32 2
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 184683593770, i64 -1)
@@ -98,19 +98,19 @@ define void @ArrayInitConst2() sanitize_memtag {
; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX8_16]], i64 48)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 64)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, i32 16, align 4
- call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store i32 42, ptr %x, align 4
%0 = getelementptr i32, ptr %x, i32 1
store i32 43, ptr %0, align 4
%1 = getelementptr i32, ptr %x, i32 2
store i64 -1, ptr %1, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -122,23 +122,23 @@ define void @ArrayInitConstSplit() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 16, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 1
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 -4294967296, i64 4294967295)
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TX]], i32 16
; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TMP1]], i64 48)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 64)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, i32 16, align 4
- call void @llvm.lifetime.start.p0(i64 64, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
%0 = getelementptr i32, ptr %x, i32 1
store i64 -1, ptr %0, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 64, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -150,7 +150,7 @@ define void @ArrayInitConstWithHoles() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 32, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 128, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 5
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TX]], i32 14
; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX]], i64 16)
@@ -164,18 +164,18 @@ define void @ArrayInitConstWithHoles() sanitize_memtag {
; CHECK-NEXT: call void @llvm.aarch64.settag.zero(ptr [[TX8_64]], i64 64)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 128)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 128, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, i32 32, align 4
- call void @llvm.lifetime.start.p0(i64 128, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
%0 = getelementptr i32, ptr %x, i32 5
store i32 42, ptr %0, align 4
%1 = getelementptr i32, ptr %x, i32 14
store i32 43, ptr %1, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 128, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -187,20 +187,20 @@ define void @InitNonConst(i32 %v) sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16
; CHECK-NEXT: [[X_TAG:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[V]] to i64
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[X_TAG]], i64 [[TMP0]], i64 0)
; CHECK-NEXT: call void @use(ptr nonnull [[X_TAG]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store i32 %v, ptr %x, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -212,7 +212,7 @@ define void @InitNonConst2(i32 %v, i32 %w) sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 4, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[V]] to i64
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[TX]], i32 1
; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[W]] to i64
@@ -221,17 +221,17 @@ define void @InitNonConst2(i32 %v, i32 %w) sanitize_memtag {
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 [[VW]], i64 0)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, i32 4, align 4
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store i32 %v, ptr %x, align 4
%0 = getelementptr i32, ptr %x, i32 1
store i32 %w, ptr %0, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -243,19 +243,19 @@ define void @InitVector() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 4, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 bitcast (<2 x i32> <i32 1, i32 2> to i64), i64 0)
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, i32 4, align 4
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store <2 x i32> <i32 1, i32 2>, ptr %x, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -302,23 +302,23 @@ define void @InitVectorSplit() sanitize_memtag {
; CHECK-NEXT: [[BASETAG:%.*]] = call ptr @llvm.aarch64.irg.sp(i64 0)
; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 4, align 16
; CHECK-NEXT: [[TX:%.*]] = call ptr @llvm.aarch64.tagp.p0(ptr [[X]], ptr [[BASETAG]], i64 0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[TX]], i32 1
; CHECK-NEXT: [[TMP1:%.*]] = shl i64 bitcast (<2 x i32> <i32 1, i32 2> to i64), 32
; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 bitcast (<2 x i32> <i32 1, i32 2> to i64), 32
; CHECK-NEXT: call void @llvm.aarch64.stgp(ptr [[TX]], i64 [[TMP1]], i64 [[LSHR]])
; CHECK-NEXT: call void @use(ptr nonnull [[TX]])
; CHECK-NEXT: call void @llvm.aarch64.settag(ptr [[X]], i64 16)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: ret void
;
entry:
%x = alloca i32, i32 4, align 4
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
%0 = getelementptr i32, ptr %x, i32 1
store <2 x i32> <i32 1, i32 2>, ptr %0, align 4
call void @use(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir b/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir
index 45f6bfe..0fa5103 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-merge-past-memcpy.mir
@@ -18,15 +18,15 @@
%C.tag = call ptr @llvm.aarch64.tagp.p0(ptr %C, ptr %basetag, i64 1)
call void @llvm.aarch64.settag(ptr %C.tag, i64 32)
call void @F56(ptr %C.tag)
- call void @llvm.lifetime.start.p0(i64 32, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
call void @llvm.aarch64.settag(ptr %A.tag, i64 32)
call void @F56(ptr %A.tag)
call void @llvm.aarch64.settag(ptr %A, i64 32)
- call void @llvm.lifetime.end.p0(i64 32, ptr %A)
- call void @llvm.lifetime.start.p0(i64 32, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %A, ptr align 4 @glob, i64 32, i1 false)
call void @F78(ptr %A)
- call void @llvm.lifetime.end.p0(i64 32, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
call void @llvm.aarch64.settag(ptr %C, i64 32)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
index aa9cccc..91adf82 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
@@ -17,17 +17,17 @@ S0:
S1:
; CHECK-LABEL: S1:
- call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %v) #1
+ call void @llvm.lifetime.start.p0(ptr nonnull %v) #1
; CHECK: call void @llvm.aarch64.settag(ptr %v.tag, i64 48)
- call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %w) #1
+ call void @llvm.lifetime.start.p0(ptr nonnull %w) #1
; CHECK: call void @llvm.aarch64.settag(ptr %w.tag, i64 48)
%t1 = call i32 @g1(ptr nonnull %v, ptr nonnull %w) #1
; CHECK: call i32 @g1
; CHECK-NOT: settag{{.*}}%v
; CHECK: call void @llvm.aarch64.settag(ptr %w, i64 48)
; CHECK-NOT: settag{{.*}}%v
- call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w) #1
-; CHECK: call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %w)
+ call void @llvm.lifetime.end.p0(ptr nonnull %w) #1
+; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %w)
%b1 = icmp eq i32 %t1, 0
br i1 %b1, label %S2, label %S3
; CHECK-NOT: settag
@@ -40,7 +40,7 @@ S2:
S3:
; CHECK-LABEL: S3:
- call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %v) #1
+ call void @llvm.lifetime.end.p0(ptr nonnull %v) #1
tail call void @z1() #1
br label %exit2
; CHECK-NOT: settag
@@ -73,9 +73,9 @@ declare void @z1() #0
declare void @z2() #0
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-math.ll b/llvm/test/CodeGen/AMDGPU/bf16-math.ll
index 3a82f84..30a7864 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-math.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-math.ll
@@ -370,9 +370,6 @@ define amdgpu_ps bfloat @test_clamp_bf16_folding(bfloat %src) {
; GCN: ; %bb.0:
; GCN-NEXT: v_exp_bf16_e64 v0, v0 clamp
; GCN-NEXT: ; return to shader part epilog
-
-
-
%exp = call bfloat @llvm.exp2.bf16(bfloat %src)
%max = call bfloat @llvm.maxnum.bf16(bfloat %exp, bfloat 0.0)
%clamp = call bfloat @llvm.minnum.bf16(bfloat %max, bfloat 1.0)
@@ -384,9 +381,6 @@ define amdgpu_ps float @test_clamp_v2bf16_folding(<2 x bfloat> %src0, <2 x bfloa
; GCN: ; %bb.0:
; GCN-NEXT: v_pk_mul_bf16 v0, v0, v1 clamp
; GCN-NEXT: ; return to shader part epilog
-
-
-
%mul = fmul <2 x bfloat> %src0, %src1
%max = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %mul, <2 x bfloat> <bfloat 0.0, bfloat 0.0>)
%clamp = call <2 x bfloat> @llvm.minnum.v2bf16(<2 x bfloat> %max, <2 x bfloat> <bfloat 1.0, bfloat 1.0>)
@@ -400,9 +394,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vvv(ptr addrspace(1) %out, <2 x bfl
; GCN-NEXT: v_pk_fma_bf16 v2, v2, v3, v4
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
-
-
-
%mul = fmul contract <2 x bfloat> %a, %b
%add = fadd contract <2 x bfloat> %mul, %c
store <2 x bfloat> %add, ptr addrspace(1) %out
@@ -415,9 +406,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vss(ptr addrspace(1) %out, <2 x bfl
; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, s1
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
-
-
-
%mul = fmul contract <2 x bfloat> %a, %b
%add = fadd contract <2 x bfloat> %mul, %c
store <2 x bfloat> %add, ptr addrspace(1) %out
@@ -432,9 +420,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_sss(ptr addrspace(1) %out, <2 x bfl
; GCN-NEXT: v_pk_fma_bf16 v2, s0, s1, v2
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
-
-
-
%mul = fmul contract <2 x bfloat> %a, %b
%add = fadd contract <2 x bfloat> %mul, %c
store <2 x bfloat> %add, ptr addrspace(1) %out
@@ -447,9 +432,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vsc(ptr addrspace(1) %out, <2 x bfl
; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, 0.5 op_sel_hi:[1,1,0]
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
-
-
-
%mul = fmul contract <2 x bfloat> %a, %b
%add = fadd contract <2 x bfloat> %mul, <bfloat 0.5, bfloat 0.5>
store <2 x bfloat> %add, ptr addrspace(1) %out
@@ -464,9 +446,6 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vll(ptr addrspace(1) %out, <2 x bfl
; GCN-NEXT: v_pk_fma_bf16 v2, 0x42c83f80, v2, s0
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
-
-
-
%mul = fmul contract <2 x bfloat> %a, <bfloat 1.0, bfloat 100.0>
%add = fadd contract <2 x bfloat> %mul, <bfloat 2.0, bfloat 200.0>
store <2 x bfloat> %add, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 8f8ea13..505ddc8 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -24671,7 +24671,6 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
ret <32 x bfloat> %op
}
-
declare bfloat @llvm.maxnum.bf16(bfloat, bfloat)
declare <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat>, <2 x bfloat>)
declare <3 x bfloat> @llvm.maxnum.v3bf16(<3 x bfloat>, <3 x bfloat>)
@@ -29673,7 +29672,6 @@ define { bfloat, i16 } @v_frexp_bf16_i16(bfloat %a) {
ret { bfloat, i16 } %op
}
-
declare bfloat @llvm.log.bf16(bfloat)
declare bfloat @llvm.log2.bf16(bfloat)
declare bfloat @llvm.log10.bf16(bfloat)
diff --git a/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll b/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll
index f58cb84..839d0ba 100644
--- a/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/expand-variadic-call.ll
@@ -38,11 +38,11 @@ define hidden void @copy(ptr noundef %va) {
; CHECK-NEXT: %va.addr.ascast = addrspacecast ptr addrspace(5) %va.addr to ptr
; CHECK-NEXT: %cp.ascast = addrspacecast ptr addrspace(5) %cp to ptr
; CHECK-NEXT: store ptr %va, ptr addrspace(5) %va.addr, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %cp)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %cp)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr %cp.ascast, ptr %va.addr.ascast, i32 8, i1 false)
; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %cp, align 8
; CHECK-NEXT: call void @valist(ptr noundef %0)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %cp)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %cp)
; CHECK-NEXT: ret void
;
entry:
@@ -51,43 +51,43 @@ entry:
%va.addr.ascast = addrspacecast ptr addrspace(5) %va.addr to ptr
%cp.ascast = addrspacecast ptr addrspace(5) %cp to ptr
store ptr %va, ptr addrspace(5) %va.addr, align 8
- call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %cp)
+ call void @llvm.lifetime.start.p5(ptr addrspace(5) %cp)
call void @llvm.va_copy.p0(ptr %cp.ascast, ptr nonnull %va.addr.ascast)
%0 = load ptr, ptr addrspace(5) %cp, align 8
call void @valist(ptr noundef %0)
- call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %cp)
+ call void @llvm.lifetime.end.p5(ptr addrspace(5) %cp)
ret void
}
-declare void @llvm.lifetime.start.p5(i64 immarg, ptr addrspace(5) nocapture)
+declare void @llvm.lifetime.start.p5(ptr addrspace(5) nocapture)
declare void @llvm.va_copy.p0(ptr, ptr)
declare hidden void @valist(ptr noundef)
-declare void @llvm.lifetime.end.p5(i64 immarg, ptr addrspace(5) nocapture)
+declare void @llvm.lifetime.end.p5(ptr addrspace(5) nocapture)
define hidden void @start_once(...) {
; CHECK-LABEL: define {{[^@]+}}@start_once(ptr %varargs) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %s = alloca ptr, align 8, addrspace(5)
; CHECK-NEXT: %s.ascast = addrspacecast ptr addrspace(5) %s to ptr
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s)
; CHECK-NEXT: store ptr %varargs, ptr %s.ascast, align 8
; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %s, align 8
; CHECK-NEXT: call void @valist(ptr noundef %0)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s)
; CHECK-NEXT: ret void
;
entry:
%s = alloca ptr, align 8, addrspace(5)
%s.ascast = addrspacecast ptr addrspace(5) %s to ptr
- call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s)
+ call void @llvm.lifetime.start.p5(ptr addrspace(5) %s)
call void @llvm.va_start.p0(ptr %s.ascast)
%0 = load ptr, ptr addrspace(5) %s, align 8
call void @valist(ptr noundef %0)
call void @llvm.va_end.p0(ptr %s.ascast)
- call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s)
+ call void @llvm.lifetime.end.p5(ptr addrspace(5) %s)
ret void
}
@@ -102,16 +102,16 @@ define hidden void @start_twice(...) {
; CHECK-NEXT: %s1 = alloca ptr, align 8, addrspace(5)
; CHECK-NEXT: %s0.ascast = addrspacecast ptr addrspace(5) %s0 to ptr
; CHECK-NEXT: %s1.ascast = addrspacecast ptr addrspace(5) %s1 to ptr
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s1)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s0)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %s1)
; CHECK-NEXT: store ptr %varargs, ptr %s0.ascast, align 8
; CHECK-NEXT: %0 = load ptr, ptr addrspace(5) %s0, align 8
; CHECK-NEXT: call void @valist(ptr noundef %0)
; CHECK-NEXT: store ptr %varargs, ptr %s1.ascast, align 8
; CHECK-NEXT: %1 = load ptr, ptr addrspace(5) %s1, align 8
; CHECK-NEXT: call void @valist(ptr noundef %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s0)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s1)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %s0)
; CHECK-NEXT: ret void
;
entry:
@@ -119,8 +119,8 @@ entry:
%s1 = alloca ptr, align 8, addrspace(5)
%s0.ascast = addrspacecast ptr addrspace(5) %s0 to ptr
%s1.ascast = addrspacecast ptr addrspace(5) %s1 to ptr
- call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s0)
- call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %s1)
+ call void @llvm.lifetime.start.p5(ptr addrspace(5) %s0)
+ call void @llvm.lifetime.start.p5(ptr addrspace(5) %s1)
call void @llvm.va_start.p0(ptr %s0.ascast)
%0 = load ptr, ptr addrspace(5) %s0, align 8
call void @valist(ptr noundef %0)
@@ -129,8 +129,8 @@ entry:
%1 = load ptr, ptr addrspace(5) %s1, align 8
call void @valist(ptr noundef %1)
call void @llvm.va_end.p0(ptr %s1.ascast)
- call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s1)
- call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %s0)
+ call void @llvm.lifetime.end.p5(ptr addrspace(5) %s1)
+ call void @llvm.lifetime.end.p5(ptr addrspace(5) %s0)
ret void
}
@@ -138,12 +138,12 @@ define hidden void @single_i32(i32 noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_i32(i32 noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -157,12 +157,12 @@ define hidden void @single_double(double noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_double(double noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_double.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 8, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store double %x, ptr addrspace(5) %0, align 8
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 8, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -174,12 +174,12 @@ define hidden void @single_v4f32(<4 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v4f32(<4 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v4f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 16, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <4 x float> %x, ptr addrspace(5) %0, align 16
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 16, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -191,12 +191,12 @@ define hidden void @single_v8f32(<8 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v8f32(<8 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v8f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <8 x float> %x, ptr addrspace(5) %0, align 32
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -208,12 +208,12 @@ define hidden void @single_v16f32(<16 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v16f32(<16 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v16f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 64, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <16 x float> %x, ptr addrspace(5) %0, align 64
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 64, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -225,12 +225,12 @@ define hidden void @single_v32f32(<32 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v32f32(<32 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v32f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 128, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <32 x float> %x, ptr addrspace(5) %0, align 128
; CHECK-NEXT: %1 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 128, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -242,14 +242,14 @@ define hidden void @i32_double(i32 noundef %x, double noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_double(i32 noundef %x, double noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_double.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 12, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_double.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store double %y, ptr addrspace(5) %1, align 8
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 12, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -261,14 +261,14 @@ define hidden void @double_i32(double noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@double_i32(double noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %double_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 12, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %double_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store double %x, ptr addrspace(5) %0, align 8
; CHECK-NEXT: %1 = getelementptr inbounds nuw %double_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 12, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -286,14 +286,14 @@ define hidden void @i32_libcS(i32 noundef %x, i8 %y.coerce0, i16 %y.coerce1, i32
; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %y.coerce3, 3
; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %y.coerce4, 4
; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %y.coerce5, 5
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %1, align 8
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -317,14 +317,14 @@ define hidden void @libcS_i32(i8 %x.coerce0, i16 %x.coerce1, i32 %x.coerce2, i64
; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %x.coerce3, 3
; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %x.coerce4, 4
; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %x.coerce5, 5
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %libcS_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %0, align 8
; CHECK-NEXT: %1 = getelementptr inbounds nuw %libcS_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -342,14 +342,14 @@ define hidden void @i32_v4f32(i32 noundef %x, <4 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v4f32(i32 noundef %x, <4 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v4f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 20, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store <4 x float> %y, ptr addrspace(5) %1, align 16
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 20, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -361,14 +361,14 @@ define hidden void @v4f32_i32(<4 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v4f32_i32(<4 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v4f32_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 20, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <4 x float> %x, ptr addrspace(5) %0, align 16
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 20, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -380,14 +380,14 @@ define hidden void @i32_v8f32(i32 noundef %x, <8 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v8f32(i32 noundef %x, <8 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v8f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store <8 x float> %y, ptr addrspace(5) %1, align 32
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -399,14 +399,14 @@ define hidden void @v8f32_i32(<8 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v8f32_i32(<8 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v8f32_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <8 x float> %x, ptr addrspace(5) %0, align 32
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 36, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -418,14 +418,14 @@ define hidden void @i32_v16f32(i32 noundef %x, <16 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v16f32(i32 noundef %x, <16 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v16f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 68, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store <16 x float> %y, ptr addrspace(5) %1, align 64
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 68, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -437,14 +437,14 @@ define hidden void @v16f32_i32(<16 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v16f32_i32(<16 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v16f32_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 68, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <16 x float> %x, ptr addrspace(5) %0, align 64
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 68, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -456,14 +456,14 @@ define hidden void @i32_v32f32(i32 noundef %x, <32 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v32f32(i32 noundef %x, <32 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v32f32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 132, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store <32 x float> %y, ptr addrspace(5) %1, align 128
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 132, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -475,14 +475,14 @@ define hidden void @v32f32_i32(<32 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v32f32_i32(<32 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v32f32_i32.vararg, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 132, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <32 x float> %x, ptr addrspace(5) %0, align 128
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr addrspace(5) %1, align 4
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void @vararg(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 132, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -495,12 +495,12 @@ define hidden void @fptr_single_i32(i32 noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %fptr_single_i32.vararg, align 4, addrspace(5)
; CHECK-NEXT: %0 = load volatile ptr, ptr addrspacecast (ptr addrspace(1) @vararg_ptr to ptr), align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_single_i32.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr addrspace(5) %1, align 4
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void %0(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -520,12 +520,12 @@ define hidden void @fptr_libcS(i8 %x.coerce0, i16 %x.coerce1, i32 %x.coerce2, i6
; CHECK-NEXT: %.fca.3.insert = insertvalue %struct.libcS %.fca.2.insert, i64 %x.coerce3, 3
; CHECK-NEXT: %.fca.4.insert = insertvalue %struct.libcS %.fca.3.insert, float %x.coerce4, 4
; CHECK-NEXT: %.fca.5.insert = insertvalue %struct.libcS %.fca.4.insert, double %x.coerce5, 5
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 32, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_libcS.vararg, ptr addrspace(5) %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store %struct.libcS %.fca.5.insert, ptr addrspace(5) %1, align 8
; CHECK-NEXT: %2 = addrspacecast ptr addrspace(5) %vararg_buffer to ptr
; CHECK-NEXT: call void %0(ptr %2)
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 32, ptr addrspace(5) %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
index 462090c..0a2e7af 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
@@ -1,12 +1,46 @@
-; RUN: llc -mtriple=amdgcn -mcpu=verde < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize64 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX10 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX10 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -amdgpu-enable-delay-alu=0 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX11-12,GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 -amdgpu-enable-delay-alu=0 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX11-12,GFX12 %s
-; GCN-LABEL: {{^}}gs_const:
-; GCN-NOT: v_cmpx
-; GCN: s_mov_b64 exec, 0
define amdgpu_gs void @gs_const() {
+; SI-LABEL: gs_const:
+; SI: ; %bb.0:
+; SI-NEXT: s_mov_b64 s[0:1], exec
+; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: gs_const:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_mov_b64 s[0:1], exec
+; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], exec
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-12-LABEL: gs_const:
+; GFX11-12: ; %bb.0:
+; GFX11-12-NEXT: s_mov_b64 s[0:1], exec
+; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], exec
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_mov_b32 m0, 0
+; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-12-NEXT: s_endpgm
+; GFX11-12-NEXT: ; %bb.1:
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_endpgm
%tmp = icmp ule i32 0, 3
%tmp1 = select i1 %tmp, float 1.000000e+00, float -1.000000e+00
%c1 = fcmp oge float %tmp1, 0.0
@@ -19,12 +53,81 @@ define amdgpu_gs void @gs_const() {
ret void
}
-; GCN-LABEL: {{^}}vcc_implicit_def:
-; GCN: v_cmp_nle_f32_e32 vcc, 0, v{{[0-9]+}}
-; GCN: v_cmp_gt_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], 0, v{{[0-9]+}}
-; GCN: s_and{{n2|_not1}}_b64 exec, exec, vcc
-; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1.0, [[CMP]]
define amdgpu_ps void @vcc_implicit_def(float %arg13, float %arg14) {
+; SI-LABEL: vcc_implicit_def:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1
+; SI-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1]
+; SI-NEXT: exp mrt1 v0, v0, v0, v0 done vm
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB1_2:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: exp null off, off, off, off done vm
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: vcc_implicit_def:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1
+; GFX10-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1]
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: exp mrt1 v0, v0, v0, v0 done vm
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: .LBB1_2:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: exp null off, off, off, off done vm
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: vcc_implicit_def:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1
+; GFX11-NEXT: v_cmp_gt_f32_e64 s[0:1], 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1]
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: exp mrt1 v0, v0, v0, v0 done
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: .LBB1_2:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: exp mrt0 off, off, off, off done
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: vcc_implicit_def:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_le_f32_e64 s[0:1], 0, v1
+; GFX12-NEXT: s_mov_b64 s[2:3], exec
+; GFX12-NEXT: v_cmp_gt_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1]
+; GFX12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1]
+; GFX12-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_and_b64 exec, exec, s[2:3]
+; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, vcc
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: export mrt1 v0, v0, v0, v0 done
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: .LBB1_2:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: export mrt0 off, off, off, off done
+; GFX12-NEXT: s_endpgm
%tmp0 = fcmp olt float %arg13, 0.000000e+00
%c1 = fcmp oge float %arg14, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
@@ -34,31 +137,102 @@ define amdgpu_ps void @vcc_implicit_def(float %arg13, float %arg14) {
ret void
}
-; GCN-LABEL: {{^}}true:
-; GCN-NEXT: %bb.
-; GCN-NEXT: s_endpgm
define amdgpu_gs void @true() {
+; GCN-LABEL: true:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_endpgm
call void @llvm.amdgcn.kill(i1 true)
ret void
}
-; GCN-LABEL: {{^}}false:
-; GCN-NOT: v_cmpx
-; GCN: s_mov_b64 exec, 0
define amdgpu_gs void @false() {
+; SI-LABEL: false:
+; SI: ; %bb.0:
+; SI-NEXT: s_andn2_b64 exec, exec, exec
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: false:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_andn2_b64 exec, exec, exec
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-12-LABEL: false:
+; GFX11-12: ; %bb.0:
+; GFX11-12-NEXT: s_and_not1_b64 exec, exec, exec
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_mov_b32 m0, 0
+; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-12-NEXT: s_endpgm
+; GFX11-12-NEXT: ; %bb.1:
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_endpgm
call void @llvm.amdgcn.kill(i1 false)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}and:
-; GCN: v_cmp_lt_i32
-; GCN: v_cmp_lt_i32
-; GCN: s_or_b64 s[0:1]
-; GCN: s_and{{n2|_not1}}_b64 s[0:1], exec, s[0:1]
-; GCN: s_and{{n2|_not1}}_b64 s[2:3], s[2:3], s[0:1]
-; GCN: s_and_b64 exec, exec, s[2:3]
define amdgpu_gs void @and(i32 %a, i32 %b, i32 %c, i32 %d) {
+; SI-LABEL: and:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1
+; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3
+; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: s_mov_b64 s[2:3], exec
+; SI-NEXT: s_andn2_b64 s[0:1], exec, s[0:1]
+; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1]
+; SI-NEXT: s_and_b64 exec, exec, s[2:3]
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: and:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1
+; GFX10-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3
+; GFX10-NEXT: s_mov_b64 s[2:3], exec
+; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 s[0:1], exec, s[0:1]
+; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1]
+; GFX10-NEXT: s_and_b64 exec, exec, s[2:3]
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-12-LABEL: and:
+; GFX11-12: ; %bb.0:
+; GFX11-12-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1
+; GFX11-12-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3
+; GFX11-12-NEXT: s_mov_b64 s[2:3], exec
+; GFX11-12-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX11-12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1]
+; GFX11-12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1]
+; GFX11-12-NEXT: s_and_b64 exec, exec, s[2:3]
+; GFX11-12-NEXT: s_mov_b32 m0, 0
+; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-12-NEXT: s_endpgm
+; GFX11-12-NEXT: ; %bb.1:
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_endpgm
%c1 = icmp slt i32 %a, %b
%c2 = icmp slt i32 %c, %d
%x = or i1 %c1, %c2
@@ -67,13 +241,52 @@ define amdgpu_gs void @and(i32 %a, i32 %b, i32 %c, i32 %d) {
ret void
}
-; GCN-LABEL: {{^}}andn2:
-; GCN: v_cmp_lt_i32
-; GCN: v_cmp_lt_i32
-; GCN: s_xor_b64 s[0:1]
-; GCN: s_and{{n2|_not1}}_b64 s[2:3], s[2:3], s[0:1]
-; GCN: s_and_b64 exec, exec, s[2:3]
define amdgpu_gs void @andn2(i32 %a, i32 %b, i32 %c, i32 %d) {
+; SI-LABEL: andn2:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1
+; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3
+; SI-NEXT: s_mov_b64 s[2:3], exec
+; SI-NEXT: s_xor_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1]
+; SI-NEXT: s_and_b64 exec, exec, s[2:3]
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: andn2:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1
+; GFX10-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3
+; GFX10-NEXT: s_mov_b64 s[2:3], exec
+; GFX10-NEXT: s_xor_b64 s[0:1], vcc, s[0:1]
+; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1]
+; GFX10-NEXT: s_and_b64 exec, exec, s[2:3]
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-12-LABEL: andn2:
+; GFX11-12: ; %bb.0:
+; GFX11-12-NEXT: v_cmp_lt_i32_e32 vcc, v0, v1
+; GFX11-12-NEXT: v_cmp_lt_i32_e64 s[0:1], v2, v3
+; GFX11-12-NEXT: s_mov_b64 s[2:3], exec
+; GFX11-12-NEXT: s_xor_b64 s[0:1], vcc, s[0:1]
+; GFX11-12-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[0:1]
+; GFX11-12-NEXT: s_and_b64 exec, exec, s[2:3]
+; GFX11-12-NEXT: s_mov_b32 m0, 0
+; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-12-NEXT: s_endpgm
+; GFX11-12-NEXT: ; %bb.1:
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_endpgm
%c1 = icmp slt i32 %a, %b
%c2 = icmp slt i32 %c, %d
%x = xor i1 %c1, %c2
@@ -83,135 +296,854 @@ define amdgpu_gs void @andn2(i32 %a, i32 %b, i32 %c, i32 %d) {
ret void
}
-; GCN-LABEL: {{^}}oeq:
-; GCN: v_cmp_neq_f32
+; Should use v_cmp_neq_f32
define amdgpu_gs void @oeq(float %a) {
+; SI-LABEL: oeq:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: oeq:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: oeq:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: oeq:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp oeq float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}ogt:
-; GCN: v_cmp_nlt_f32
+; Should use v_cmp_nlt_f32
define amdgpu_gs void @ogt(float %a) {
+; SI-LABEL: ogt:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ogt:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ogt:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: ogt:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_lt_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp ogt float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}oge:
-; GCN: v_cmp_nle_f32
+; Should use v_cmp_nle_f32
define amdgpu_gs void @oge(float %a) {
+; SI-LABEL: oge:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: oge:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: oge:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: oge:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_le_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp oge float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}olt:
-; GCN: v_cmp_ngt_f32
+; Should use v_cmp_ngt_f32
define amdgpu_gs void @olt(float %a) {
+; SI-LABEL: olt:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: olt:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: olt:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_ngt_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: olt:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_gt_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp olt float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}ole:
-; GCN: v_cmp_nge_f32
+; Should use v_cmp_nge_f32
define amdgpu_gs void @ole(float %a) {
+; SI-LABEL: ole:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ole:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ole:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: ole:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp ole float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}one:
-; GCN: v_cmp_nlg_f32
+; Should use v_cmp_nlg_f32
define amdgpu_gs void @one(float %a) {
+; SI-LABEL: one:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: one:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: one:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: one:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp one float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}ord:
-; GCN: v_cmp_o_f32
+; Should use v_cmp_o_f32
define amdgpu_gs void @ord(float %a) {
+; SI-LABEL: ord:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; SI-NEXT: s_mov_b64 s[0:1], exec
+; SI-NEXT: s_andn2_b64 s[2:3], exec, vcc
+; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
+; SI-NEXT: s_and_b64 exec, exec, s[0:1]
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ord:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX10-NEXT: s_mov_b64 s[0:1], exec
+; GFX10-NEXT: s_andn2_b64 s[2:3], exec, vcc
+; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
+; GFX10-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-12-LABEL: ord:
+; GFX11-12: ; %bb.0:
+; GFX11-12-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
+; GFX11-12-NEXT: s_mov_b64 s[0:1], exec
+; GFX11-12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX11-12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX11-12-NEXT: s_mov_b32 m0, 0
+; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-12-NEXT: s_endpgm
+; GFX11-12-NEXT: ; %bb.1:
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_endpgm
%c1 = fcmp ord float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}uno:
-; GCN: v_cmp_u_f32
+; Should use v_cmp_u_f32
define amdgpu_gs void @uno(float %a) {
+; SI-LABEL: uno:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; SI-NEXT: s_mov_b64 s[0:1], exec
+; SI-NEXT: s_andn2_b64 s[2:3], exec, vcc
+; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
+; SI-NEXT: s_and_b64 exec, exec, s[0:1]
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: uno:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GFX10-NEXT: s_mov_b64 s[0:1], exec
+; GFX10-NEXT: s_andn2_b64 s[2:3], exec, vcc
+; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
+; GFX10-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-12-LABEL: uno:
+; GFX11-12: ; %bb.0:
+; GFX11-12-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GFX11-12-NEXT: s_mov_b64 s[0:1], exec
+; GFX11-12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX11-12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX11-12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX11-12-NEXT: s_mov_b32 m0, 0
+; GFX11-12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-12-NEXT: s_endpgm
+; GFX11-12-NEXT: ; %bb.1:
+; GFX11-12-NEXT: s_mov_b64 exec, 0
+; GFX11-12-NEXT: s_endpgm
%c1 = fcmp uno float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}ueq:
-; GCN: v_cmp_lg_f32
+; Should use v_cmp_lg_f32
define amdgpu_gs void @ueq(float %a) {
+; SI-LABEL: ueq:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ueq:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ueq:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_lg_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: ueq:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_nlg_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp ueq float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}ugt:
-; GCN: v_cmp_ge_f32
+; Should use v_cmp_ge_f32
define amdgpu_gs void @ugt(float %a) {
+; SI-LABEL: ugt:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ugt:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ugt:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_ge_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: ugt:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_nge_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp ugt float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}uge:
-; GCN: v_cmp_gt_f32_e32 vcc, -1.0
+; Should use v_cmp_gt_f32_e32 vcc, -1.0
define amdgpu_gs void @uge(float %a) {
+; SI-LABEL: uge:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: uge:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: uge:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_gt_f32_e32 vcc, -1.0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: uge:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_ngt_f32_e32 vcc, -1.0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp uge float %a, -1.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}ult:
-; GCN: v_cmp_le_f32_e32 vcc, -2.0
+; Should use v_cmp_le_f32_e32 vcc, -2.0
define amdgpu_gs void @ult(float %a) {
+; SI-LABEL: ult:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ult:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ult:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_le_f32_e32 vcc, -2.0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: ult:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_nle_f32_e32 vcc, -2.0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp ult float %a, -2.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}ule:
-; GCN: v_cmp_lt_f32_e32 vcc, 2.0
+; Should use v_cmp_lt_f32_e32 vcc, 2.0
define amdgpu_gs void @ule(float %a) {
+; SI-LABEL: ule:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: ule:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: ule:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_lt_f32_e32 vcc, 2.0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: ule:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_nlt_f32_e32 vcc, 2.0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp ule float %a, 2.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}une:
-; GCN: v_cmp_eq_f32_e32 vcc, 0
+; Should use v_cmp_eq_f32_e32 vcc, 0
define amdgpu_gs void @une(float %a) {
+; SI-LABEL: une:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: une:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: une:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: une:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp une float %a, 0.0
call void @llvm.amdgcn.kill(i1 %c1)
call void @llvm.amdgcn.s.sendmsg(i32 3, i32 0)
ret void
}
-; GCN-LABEL: {{^}}neg_olt:
-; GCN: v_cmp_gt_f32_e32 vcc, 1.0
+; Should use v_cmp_gt_f32_e32 vcc, 1.0
define amdgpu_gs void @neg_olt(float %a) {
+; SI-LABEL: neg_olt:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_nop 0
+; SI-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; SI-NEXT: s_endpgm
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: neg_olt:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_mov_b32 m0, 0
+; GFX10-NEXT: s_sendmsg sendmsg(MSG_GS_DONE, GS_OP_NOP)
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: neg_olt:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_gt_f32_e32 vcc, 1.0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_mov_b32 m0, 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: neg_olt:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_ngt_f32_e32 vcc, 1.0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_mov_b32 m0, 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: s_endpgm
%c1 = fcmp olt float %a, 1.0
%c2 = xor i1 %c1, 1
call void @llvm.amdgcn.kill(i1 %c2)
@@ -219,13 +1151,61 @@ define amdgpu_gs void @neg_olt(float %a) {
ret void
}
-; GCN-LABEL: {{^}}fcmp_x2:
; FIXME: LLVM should be able to combine these fcmp opcodes.
-; SI: v_cmp_lt_f32_e32 vcc, s{{[0-9]+}}, v0
-; GFX10: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0
-; GCN: v_cndmask_b32
-; GCN: v_cmp_nle_f32
define amdgpu_ps void @fcmp_x2(float %a) #0 {
+; SI-LABEL: fcmp_x2:
+; SI: ; %bb.0:
+; SI-NEXT: s_mov_b32 s0, 0x3e800000
+; SI-NEXT: v_cmp_lt_f32_e32 vcc, s0, v0
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc
+; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_cbranch_scc0 .LBB21_1
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB21_1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: exp null off, off, off, off done vm
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: fcmp_x2:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc
+; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_cbranch_scc0 .LBB21_1
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: .LBB21_1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: exp null off, off, off, off done vm
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: fcmp_x2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc
+; GFX11-NEXT: v_cmp_nle_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_1
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: .LBB21_1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: exp mrt0 off, off, off, off done
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: fcmp_x2:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_lt_f32_e32 vcc, 0x3e800000, v0
+; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc
+; GFX12-NEXT: v_cmp_le_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, vcc
+; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[2:3]
+; GFX12-NEXT: s_cbranch_scc0 .LBB21_1
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: .LBB21_1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: export mrt0 off, off, off, off done
+; GFX12-NEXT: s_endpgm
%ogt = fcmp nsz ogt float %a, 2.500000e-01
%k = select i1 %ogt, float -1.000000e+00, float 0.000000e+00
%c = fcmp nsz oge float %k, 0.000000e+00
@@ -234,14 +1214,78 @@ define amdgpu_ps void @fcmp_x2(float %a) #0 {
}
; Note: an almost identical test for this exists in llvm.amdgcn.wqm.vote.ll
-; GCN-LABEL: {{^}}wqm:
-; GCN: v_cmp_neq_f32_e32 vcc, 0
-; GCN-DAG: s_wqm_b64 s[2:3], vcc
-; GCN-DAG: s_mov_b64 s[0:1], exec
-; GCN: s_and{{n2|_not1}}_b64 s[2:3], exec, s[2:3]
-; GCN: s_and{{n2|_not1}}_b64 s[0:1], s[0:1], s[2:3]
-; GCN: s_and_b64 exec, exec, s[0:1]
define amdgpu_ps float @wqm(float %a) {
+; SI-LABEL: wqm:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; SI-NEXT: s_wqm_b64 s[2:3], vcc
+; SI-NEXT: s_mov_b64 s[0:1], exec
+; SI-NEXT: s_andn2_b64 s[2:3], exec, s[2:3]
+; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
+; SI-NEXT: s_cbranch_scc0 .LBB22_2
+; SI-NEXT: ; %bb.1:
+; SI-NEXT: s_and_b64 exec, exec, s[0:1]
+; SI-NEXT: v_mov_b32_e32 v0, 0
+; SI-NEXT: s_branch .LBB22_3
+; SI-NEXT: .LBB22_2:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: exp null off, off, off, off done vm
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB22_3:
+;
+; GFX10-LABEL: wqm:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; GFX10-NEXT: s_mov_b64 s[0:1], exec
+; GFX10-NEXT: s_wqm_b64 s[2:3], vcc
+; GFX10-NEXT: s_andn2_b64 s[2:3], exec, s[2:3]
+; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
+; GFX10-NEXT: s_cbranch_scc0 .LBB22_2
+; GFX10-NEXT: ; %bb.1:
+; GFX10-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-NEXT: s_branch .LBB22_3
+; GFX10-NEXT: .LBB22_2:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: exp null off, off, off, off done vm
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: .LBB22_3:
+;
+; GFX11-LABEL: wqm:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; GFX11-NEXT: s_mov_b64 s[0:1], exec
+; GFX11-NEXT: s_wqm_b64 s[2:3], vcc
+; GFX11-NEXT: s_and_not1_b64 s[2:3], exec, s[2:3]
+; GFX11-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX11-NEXT: s_cbranch_scc0 .LBB22_2
+; GFX11-NEXT: ; %bb.1:
+; GFX11-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-NEXT: s_branch .LBB22_3
+; GFX11-NEXT: .LBB22_2:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: exp mrt0 off, off, off, off done
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: .LBB22_3:
+;
+; GFX12-LABEL: wqm:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_wqm_b64 s[2:3], vcc
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[2:3]
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_cbranch_scc0 .LBB22_2
+; GFX12-NEXT: ; %bb.1:
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-NEXT: s_branch .LBB22_3
+; GFX12-NEXT: .LBB22_2:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: export mrt0 off, off, off, off done
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: .LBB22_3:
%c1 = fcmp une float %a, 0.0
%c2 = call i1 @llvm.amdgcn.wqm.vote(i1 %c1)
call void @llvm.amdgcn.kill(i1 %c2)
@@ -249,28 +1293,212 @@ define amdgpu_ps float @wqm(float %a) {
}
; This checks that we use the 64-bit encoding when the operand is a SGPR.
-; GCN-LABEL: {{^}}test_sgpr:
-; GCN: v_cmp_nle_f32_e64
define amdgpu_ps void @test_sgpr(float inreg %a) #0 {
+; SI-LABEL: test_sgpr:
+; SI: ; %bb.0:
+; SI-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0
+; SI-NEXT: s_andn2_b64 exec, exec, vcc
+; SI-NEXT: s_cbranch_scc0 .LBB23_1
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB23_1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: exp null off, off, off, off done vm
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: test_sgpr:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0
+; GFX10-NEXT: s_andn2_b64 exec, exec, vcc
+; GFX10-NEXT: s_cbranch_scc0 .LBB23_1
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: .LBB23_1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: exp null off, off, off, off done vm
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: test_sgpr:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_nle_f32_e64 vcc, s0, 1.0
+; GFX11-NEXT: s_and_not1_b64 exec, exec, vcc
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_1
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: .LBB23_1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: exp mrt0 off, off, off, off done
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: test_sgpr:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_cmp_le_f32 s0, 1.0
+; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1]
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1]
+; GFX12-NEXT: s_cbranch_scc0 .LBB23_1
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: .LBB23_1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: export mrt0 off, off, off, off done
+; GFX12-NEXT: s_endpgm
%c = fcmp ole float %a, 1.000000e+00
call void @llvm.amdgcn.kill(i1 %c) #1
ret void
}
-; GCN-LABEL: {{^}}test_non_inline_imm_sgpr:
-; GCN-NOT: v_cmp_le_f32_e64
define amdgpu_ps void @test_non_inline_imm_sgpr(float inreg %a) #0 {
+; SI-LABEL: test_non_inline_imm_sgpr:
+; SI: ; %bb.0:
+; SI-NEXT: v_mov_b32_e32 v0, 0x3fc00000
+; SI-NEXT: v_cmp_le_f32_e32 vcc, s0, v0
+; SI-NEXT: s_andn2_b64 s[0:1], exec, vcc
+; SI-NEXT: s_andn2_b64 s[2:3], exec, s[0:1]
+; SI-NEXT: s_cbranch_scc0 .LBB24_1
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB24_1:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: exp null off, off, off, off done vm
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: test_non_inline_imm_sgpr:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_ge_f32_e64 s[0:1], 0x3fc00000, s0
+; GFX10-NEXT: s_andn2_b64 s[0:1], exec, s[0:1]
+; GFX10-NEXT: s_andn2_b64 s[2:3], exec, s[0:1]
+; GFX10-NEXT: s_cbranch_scc0 .LBB24_1
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: .LBB24_1:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: exp null off, off, off, off done vm
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: test_non_inline_imm_sgpr:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_cmp_ge_f32_e64 s[0:1], 0x3fc00000, s0
+; GFX11-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1]
+; GFX11-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1]
+; GFX11-NEXT: s_cbranch_scc0 .LBB24_1
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: .LBB24_1:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: exp mrt0 off, off, off, off done
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: test_non_inline_imm_sgpr:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_cmp_le_f32 s0, 0x3fc00000
+; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX12-NEXT: s_and_not1_b64 s[0:1], exec, s[0:1]
+; GFX12-NEXT: s_and_not1_b64 s[2:3], exec, s[0:1]
+; GFX12-NEXT: s_cbranch_scc0 .LBB24_1
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: .LBB24_1:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: export mrt0 off, off, off, off done
+; GFX12-NEXT: s_endpgm
%c = fcmp ole float %a, 1.500000e+00
call void @llvm.amdgcn.kill(i1 %c) #1
ret void
}
-; GCN-LABEL: {{^}}test_scc_liveness:
-; GCN: s_cmp
-; GCN: s_and_b64 exec
-; GCN: s_cmp
-; GCN: s_cbranch_scc
define amdgpu_ps void @test_scc_liveness() #0 {
+; SI-LABEL: test_scc_liveness:
+; SI: ; %bb.0: ; %main_body
+; SI-NEXT: s_mov_b64 s[0:1], exec
+; SI-NEXT: s_mov_b32 s2, 0
+; SI-NEXT: .LBB25_1: ; %loop3
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: s_cmp_gt_i32 s2, 0
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: s_andn2_b64 s[4:5], exec, s[4:5]
+; SI-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5]
+; SI-NEXT: s_cbranch_scc0 .LBB25_4
+; SI-NEXT: ; %bb.2: ; %loop3
+; SI-NEXT: ; in Loop: Header=BB25_1 Depth=1
+; SI-NEXT: s_and_b64 exec, exec, s[0:1]
+; SI-NEXT: s_add_i32 s3, s2, 1
+; SI-NEXT: s_cmp_lt_i32 s2, 1
+; SI-NEXT: s_mov_b32 s2, s3
+; SI-NEXT: s_cbranch_scc1 .LBB25_1
+; SI-NEXT: ; %bb.3: ; %endloop15
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB25_4:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: exp null off, off, off, off done vm
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: test_scc_liveness:
+; GFX10: ; %bb.0: ; %main_body
+; GFX10-NEXT: s_mov_b64 s[0:1], exec
+; GFX10-NEXT: s_mov_b32 s2, 0
+; GFX10-NEXT: .LBB25_1: ; %loop3
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: s_cmp_gt_i32 s2, 0
+; GFX10-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GFX10-NEXT: s_andn2_b64 s[4:5], exec, s[4:5]
+; GFX10-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5]
+; GFX10-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX10-NEXT: ; %bb.2: ; %loop3
+; GFX10-NEXT: ; in Loop: Header=BB25_1 Depth=1
+; GFX10-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX10-NEXT: s_add_i32 s3, s2, 1
+; GFX10-NEXT: s_cmp_lt_i32 s2, 1
+; GFX10-NEXT: s_mov_b32 s2, s3
+; GFX10-NEXT: s_cbranch_scc1 .LBB25_1
+; GFX10-NEXT: ; %bb.3: ; %endloop15
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: .LBB25_4:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: exp null off, off, off, off done vm
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: test_scc_liveness:
+; GFX11: ; %bb.0: ; %main_body
+; GFX11-NEXT: s_mov_b64 s[0:1], exec
+; GFX11-NEXT: s_mov_b32 s2, 0
+; GFX11-NEXT: .LBB25_1: ; %loop3
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_cmp_gt_i32 s2, 0
+; GFX11-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GFX11-NEXT: s_and_not1_b64 s[4:5], exec, s[4:5]
+; GFX11-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[4:5]
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX11-NEXT: ; %bb.2: ; %loop3
+; GFX11-NEXT: ; in Loop: Header=BB25_1 Depth=1
+; GFX11-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX11-NEXT: s_add_i32 s3, s2, 1
+; GFX11-NEXT: s_cmp_lt_i32 s2, 1
+; GFX11-NEXT: s_mov_b32 s2, s3
+; GFX11-NEXT: s_cbranch_scc1 .LBB25_1
+; GFX11-NEXT: ; %bb.3: ; %endloop15
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: .LBB25_4:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: exp mrt0 off, off, off, off done
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: test_scc_liveness:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: s_mov_b64 s[0:1], exec
+; GFX12-NEXT: s_mov_b32 s2, 0
+; GFX12-NEXT: .LBB25_1: ; %loop3
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_cmp_gt_i32 s2, 0
+; GFX12-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GFX12-NEXT: s_and_not1_b64 s[4:5], exec, s[4:5]
+; GFX12-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[4:5]
+; GFX12-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX12-NEXT: ; %bb.2: ; %loop3
+; GFX12-NEXT: ; in Loop: Header=BB25_1 Depth=1
+; GFX12-NEXT: s_and_b64 exec, exec, s[0:1]
+; GFX12-NEXT: s_add_co_i32 s3, s2, 1
+; GFX12-NEXT: s_cmp_lt_i32 s2, 1
+; GFX12-NEXT: s_mov_b32 s2, s3
+; GFX12-NEXT: s_cbranch_scc1 .LBB25_1
+; GFX12-NEXT: ; %bb.3: ; %endloop15
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: .LBB25_4:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: export mrt0 off, off, off, off done
+; GFX12-NEXT: s_endpgm
main_body:
br label %loop3
@@ -287,11 +1515,139 @@ endloop15: ; preds = %loop3
; Check this compiles.
; If kill is marked as defining VCC then this will fail with live interval issues.
-; GCN-LABEL: {{^}}kill_with_loop_exit:
-; GCN: s_mov_b64 [[LIVE:s\[[0-9]+:[0-9]+\]]], exec
-; GCN: s_and{{n2|_not1}}_b64 [[LIVE]], [[LIVE]], exec
-; GCN-NEXT: s_cbranch_scc0
define amdgpu_ps void @kill_with_loop_exit(float inreg %inp0, float inreg %inp1, <4 x i32> inreg %inp2, float inreg %inp3) {
+; SI-LABEL: kill_with_loop_exit:
+; SI: ; %bb.0: ; %.entry
+; SI-NEXT: v_mov_b32_e32 v0, 0x43000000
+; SI-NEXT: v_cmp_lt_f32_e32 vcc, s0, v0
+; SI-NEXT: v_cmp_lt_f32_e64 s[0:1], s1, v0
+; SI-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
+; SI-NEXT: s_and_b64 vcc, exec, s[0:1]
+; SI-NEXT: v_mov_b32_e32 v0, 1.0
+; SI-NEXT: s_cbranch_vccnz .LBB26_5
+; SI-NEXT: ; %bb.1: ; %.preheader1.preheader
+; SI-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0
+; SI-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; SI-NEXT: s_mov_b64 s[2:3], exec
+; SI-NEXT: v_mov_b32_e32 v0, 0x3fc00000
+; SI-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1
+; SI-NEXT: .LBB26_2: ; %bb
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: s_and_b64 vcc, exec, s[0:1]
+; SI-NEXT: v_add_f32_e32 v0, 0x3e800000, v0
+; SI-NEXT: s_cbranch_vccnz .LBB26_2
+; SI-NEXT: ; %bb.3: ; %bb33
+; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], exec
+; SI-NEXT: s_cbranch_scc0 .LBB26_6
+; SI-NEXT: ; %bb.4: ; %bb33
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: .LBB26_5: ; %bb35
+; SI-NEXT: exp mrt0 v0, v0, v0, v0 done vm
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB26_6:
+; SI-NEXT: s_mov_b64 exec, 0
+; SI-NEXT: exp null off, off, off, off done vm
+; SI-NEXT: s_endpgm
+;
+; GFX10-LABEL: kill_with_loop_exit:
+; GFX10: ; %bb.0: ; %.entry
+; GFX10-NEXT: v_cmp_gt_f32_e64 s[4:5], 0x43000000, s0
+; GFX10-NEXT: v_cmp_gt_f32_e64 s[0:1], 0x43000000, s1
+; GFX10-NEXT: v_mov_b32_e32 v0, 1.0
+; GFX10-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1]
+; GFX10-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_vccnz .LBB26_5
+; GFX10-NEXT: ; %bb.1: ; %.preheader1.preheader
+; GFX10-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0
+; GFX10-NEXT: v_mov_b32_e32 v0, 0x3fc00000
+; GFX10-NEXT: s_mov_b64 s[2:3], exec
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX10-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1
+; GFX10-NEXT: .LBB26_2: ; %bb
+; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX10-NEXT: v_add_f32_e32 v0, 0x3e800000, v0
+; GFX10-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX10-NEXT: s_cbranch_vccnz .LBB26_2
+; GFX10-NEXT: ; %bb.3: ; %bb33
+; GFX10-NEXT: s_andn2_b64 s[2:3], s[2:3], exec
+; GFX10-NEXT: s_cbranch_scc0 .LBB26_6
+; GFX10-NEXT: ; %bb.4: ; %bb33
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: .LBB26_5: ; %bb35
+; GFX10-NEXT: exp mrt0 v0, v0, v0, v0 done vm
+; GFX10-NEXT: s_endpgm
+; GFX10-NEXT: .LBB26_6:
+; GFX10-NEXT: s_mov_b64 exec, 0
+; GFX10-NEXT: exp null off, off, off, off done vm
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: kill_with_loop_exit:
+; GFX11: ; %bb.0: ; %.entry
+; GFX11-NEXT: v_cmp_gt_f32_e64 s[4:5], 0x43000000, s0
+; GFX11-NEXT: v_cmp_gt_f32_e64 s[0:1], 0x43000000, s1
+; GFX11-NEXT: v_mov_b32_e32 v0, 1.0
+; GFX11-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1]
+; GFX11-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_vccnz .LBB26_5
+; GFX11-NEXT: ; %bb.1: ; %.preheader1.preheader
+; GFX11-NEXT: v_cmp_ngt_f32_e64 s[0:1], s6, 0
+; GFX11-NEXT: v_mov_b32_e32 v0, 0x3fc00000
+; GFX11-NEXT: s_mov_b64 s[2:3], exec
+; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX11-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1
+; GFX11-NEXT: .LBB26_2: ; %bb
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: v_add_f32_e32 v0, 0x3e800000, v0
+; GFX11-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX11-NEXT: s_cbranch_vccnz .LBB26_2
+; GFX11-NEXT: ; %bb.3: ; %bb33
+; GFX11-NEXT: s_and_not1_b64 s[2:3], s[2:3], exec
+; GFX11-NEXT: s_cbranch_scc0 .LBB26_6
+; GFX11-NEXT: ; %bb.4: ; %bb33
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: .LBB26_5: ; %bb35
+; GFX11-NEXT: exp mrt0 v0, v0, v0, v0 done
+; GFX11-NEXT: s_endpgm
+; GFX11-NEXT: .LBB26_6:
+; GFX11-NEXT: s_mov_b64 exec, 0
+; GFX11-NEXT: exp mrt0 off, off, off, off done
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: kill_with_loop_exit:
+; GFX12: ; %bb.0: ; %.entry
+; GFX12-NEXT: s_cmp_lt_f32 s0, 0x43000000
+; GFX12-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GFX12-NEXT: s_cmp_lt_f32 s1, 0x43000000
+; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX12-NEXT: s_and_b64 s[0:1], s[4:5], s[0:1]
+; GFX12-NEXT: s_mov_b32 s4, 1.0
+; GFX12-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX12-NEXT: s_cbranch_vccnz .LBB26_5
+; GFX12-NEXT: ; %bb.1: ; %.preheader1.preheader
+; GFX12-NEXT: s_cmp_ngt_f32 s6, 0
+; GFX12-NEXT: s_mov_b64 s[2:3], exec
+; GFX12-NEXT: s_mov_b32 s4, 0x3fc00000
+; GFX12-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX12-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0
+; GFX12-NEXT: .LBB26_2: ; %bb
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_add_f32 s4, s4, 0x3e800000
+; GFX12-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX12-NEXT: s_cbranch_vccnz .LBB26_2
+; GFX12-NEXT: ; %bb.3: ; %bb33
+; GFX12-NEXT: s_and_not1_b64 s[2:3], s[2:3], exec
+; GFX12-NEXT: s_cbranch_scc0 .LBB26_6
+; GFX12-NEXT: ; %bb.4: ; %bb33
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: .LBB26_5: ; %bb35
+; GFX12-NEXT: v_mov_b32_e32 v0, s4
+; GFX12-NEXT: export mrt0 v0, v0, v0, v0 done
+; GFX12-NEXT: s_endpgm
+; GFX12-NEXT: .LBB26_6:
+; GFX12-NEXT: s_mov_b64 exec, 0
+; GFX12-NEXT: export mrt0 off, off, off, off done
+; GFX12-NEXT: s_endpgm
.entry:
%tmp24 = fcmp olt float %inp0, 1.280000e+02
%tmp25 = fcmp olt float %inp1, 1.280000e+02
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll b/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll
index 91a8446..13ea8b0 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll
@@ -18,10 +18,9 @@ define amdgpu_cs void @test_uniform_load_b96(ptr addrspace(1) %ptr, i32 %arg) "a
; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x8
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_or_b32 s1, s2, s3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: v_mov_b32_e32 v2, s0
+; GFX11-NEXT: v_mov_b32_e32 v2, s3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, s2, v2, s0
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: s_endpgm
;
@@ -34,14 +33,12 @@ define amdgpu_cs void @test_uniform_load_b96(ptr addrspace(1) %ptr, i32 %arg) "a
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
; GFX12-NEXT: v_readfirstlane_b32 s0, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_readfirstlane_b32 s1, v3
; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_or_b32 s0, s0, s1
-; GFX12-NEXT: s_or_b32 s0, s2, s0
-; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-NEXT: v_or3_b32 v2, v2, s1, s2
; GFX12-NEXT: global_store_b32 v[0:1], v2, off
; GFX12-NEXT: s_endpgm
bb:
diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll
index 11cda2d..c96ba75 100644
--- a/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad-mix-bf16.ll
@@ -199,7 +199,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_negabsf32(bfloat %src0, bfloat %src1,
ret float %result
}
-
define float @v_mad_mix_f32_bf16lo_bf16lo_f32imm1(bfloat %src0, bfloat %src1) #0 {
; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_f32imm1:
; GFX1250: ; %bb.0:
@@ -230,7 +229,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_f32imminv2pi(bfloat %src0, bfloat %src
ret float %result
}
-
define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi(bfloat %src0, bfloat %src1) #0 {
; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi:
; GFX1250: ; %bb.0:
@@ -247,7 +245,6 @@ define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imminv2pi(bfloat %src0, bfloat
ret float %result
}
-
define float @v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imm63(bfloat %src0, bfloat %src1) #0 {
; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_cvtbf16imm63:
; GFX1250: ; %bb.0:
@@ -360,7 +357,6 @@ define float @no_mix_simple_fabs(float %src0, float %src1, float %src2) #0 {
ret float %result
}
-
define float @v_mad_mix_f32_bf16lo_bf16lo_bf16lo_f32_denormals(bfloat %src0, bfloat %src1, bfloat %src2) #1 {
; GFX1250-LABEL: v_mad_mix_f32_bf16lo_bf16lo_bf16lo_f32_denormals:
; GFX1250: ; %bb.0:
@@ -469,7 +465,6 @@ define float @v_mad_mix_f32_negprecvtbf16lo_bf16lo_bf16lo(i32 %src0.arg, bfloat
ret float %result
}
-
define float @v_mad_mix_f32_precvtnegbf16hi_abs_bf16lo_bf16lo(i32 %src0.arg, bfloat %src1, bfloat %src2) #0 {
; GFX1250-LABEL: v_mad_mix_f32_precvtnegbf16hi_abs_bf16lo_bf16lo:
; GFX1250: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll
index 4393172..03304ae 100644
--- a/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll
@@ -76,9 +76,6 @@ define bfloat @v_mad_mixlo_bf16_bf16lo_bf16lo_f32_clamp_post_cvt(bfloat %src0, b
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: v_fma_mixlo_bf16 v0, v0, v1, v2 op_sel_hi:[1,1,0] clamp
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
-
-
-
%src0.ext = fpext bfloat %src0 to float
%src1.ext = fpext bfloat %src1 to float
%result = tail call float @llvm.fmuladd.f32(float %src0.ext, float %src1.ext, float %src2)
@@ -106,7 +103,6 @@ define bfloat @v_mad_mixlo_bf16_bf16lo_bf16lo_f32_clamp_pre_cvt(bfloat %src0, bf
ret bfloat %cvt.result
}
-
define <2 x bfloat> @v_mad_mix_v2f32(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 {
; GFX1250-LABEL: v_mad_mix_v2f32:
; GFX1250: ; %bb.0:
@@ -179,7 +175,6 @@ define <4 x bfloat> @v_mad_mix_v4f32(<4 x bfloat> %src0, <4 x bfloat> %src1, <4
ret <4 x bfloat> %cvt.result
}
-
define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 {
; GFX1250-LABEL: v_mad_mix_v2f32_clamp_postcvt:
; GFX1250: ; %bb.0:
@@ -194,9 +189,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bflo
; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[4:5], v[6:7], v[0:1]
; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 clamp
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
-
-
-
%src0.ext = fpext <2 x bfloat> %src0 to <2 x float>
%src1.ext = fpext <2 x bfloat> %src1 to <2 x float>
%src2.ext = fpext <2 x bfloat> %src2 to <2 x float>
@@ -207,7 +199,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bflo
ret <2 x bfloat> %clamp
}
-
define <3 x bfloat> @v_mad_mix_v3f32_clamp_postcvt(<3 x bfloat> %src0, <3 x bfloat> %src1, <3 x bfloat> %src2) #0 {
; GFX1250-LABEL: v_mad_mix_v3f32_clamp_postcvt:
; GFX1250: ; %bb.0:
@@ -252,9 +243,6 @@ define <4 x bfloat> @v_mad_mix_v4f32_clamp_postcvt(<4 x bfloat> %src0, <4 x bflo
; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 clamp
; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v2, v3 clamp
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
-
-
-
%src0.ext = fpext <4 x bfloat> %src0 to <4 x float>
%src1.ext = fpext <4 x bfloat> %src1 to <4 x float>
%src2.ext = fpext <4 x bfloat> %src2 to <4 x float>
@@ -325,7 +313,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt_hi(<2 x bfloat> %src0, <2 x b
ret <2 x bfloat> %insert
}
-
define <2 x bfloat> @v_mad_mix_v2f32_clamp_precvt(<2 x bfloat> %src0, <2 x bfloat> %src1, <2 x bfloat> %src2) #0 {
; GFX1250-LABEL: v_mad_mix_v2f32_clamp_precvt:
; GFX1250: ; %bb.0:
@@ -353,7 +340,6 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_precvt(<2 x bfloat> %src0, <2 x bfloa
ret <2 x bfloat> %cvt.result
}
-
define <3 x bfloat> @v_mad_mix_v3f32_clamp_precvt(<3 x bfloat> %src0, <3 x bfloat> %src1, <3 x bfloat> %src2) #0 {
; GFX1250-LABEL: v_mad_mix_v3f32_clamp_precvt:
; GFX1250: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
index 0b43ff2..b35a74e 100644
--- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
@@ -200,8 +200,199 @@ bb:
ret void
}
-declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg) #1
-declare noundef i32 @llvm.amdgcn.workitem.id.x() #2
+; The inline asm requires the value be copied to an AGPR class, not
+; the AV_* pseudo we usually expect for register allocator live range
+; splits.
+define amdgpu_kernel void @test_rewrite_mfma_direct_copy_to_agpr_class(ptr addrspace(1) %arg) #0 {
+; CHECK-LABEL: test_rewrite_mfma_direct_copy_to_agpr_class:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; CHECK-NEXT: v_mov_b32_e32 v32, 2.0
+; CHECK-NEXT: v_mov_b32_e32 v33, 4.0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 a[28:31], v0, s[0:1] offset:112
+; CHECK-NEXT: global_load_dwordx4 a[24:27], v0, s[0:1] offset:96
+; CHECK-NEXT: global_load_dwordx4 a[20:23], v0, s[0:1] offset:80
+; CHECK-NEXT: global_load_dwordx4 a[16:19], v0, s[0:1] offset:64
+; CHECK-NEXT: global_load_dwordx4 a[12:15], v0, s[0:1] offset:48
+; CHECK-NEXT: global_load_dwordx4 a[8:11], v0, s[0:1] offset:32
+; CHECK-NEXT: global_load_dwordx4 a[4:7], v0, s[0:1] offset:16
+; CHECK-NEXT: global_load_dwordx4 a[0:3], v0, s[0:1]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 a[0:31], v32, v33, a[0:31]
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a[0:31]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id
+ %in = load <32 x float>, ptr addrspace(1) %gep, align 128
+ %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 2.0, float 4.0, <32 x float> %in, i32 0, i32 0, i32 0)
+ call void asm sideeffect "; use $0", "a"(<32 x float> %mai)
+ ret void
+}
+
+; TODO: Handle rewriting this case
+define void @test_rewrite_mfma_imm_src2(float %arg0, float %arg1) #0 {
+; CHECK-LABEL: test_rewrite_mfma_imm_src2:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v0, v1, 2.0
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 1
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v0
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v1
+; CHECK-NEXT: v_accvgpr_write_b32 a2, v2
+; CHECK-NEXT: v_accvgpr_write_b32 a3, v3
+; CHECK-NEXT: v_accvgpr_write_b32 a4, v4
+; CHECK-NEXT: v_accvgpr_write_b32 a5, v5
+; CHECK-NEXT: v_accvgpr_write_b32 a6, v6
+; CHECK-NEXT: v_accvgpr_write_b32 a7, v7
+; CHECK-NEXT: v_accvgpr_write_b32 a8, v8
+; CHECK-NEXT: v_accvgpr_write_b32 a9, v9
+; CHECK-NEXT: v_accvgpr_write_b32 a10, v10
+; CHECK-NEXT: v_accvgpr_write_b32 a11, v11
+; CHECK-NEXT: v_accvgpr_write_b32 a12, v12
+; CHECK-NEXT: v_accvgpr_write_b32 a13, v13
+; CHECK-NEXT: v_accvgpr_write_b32 a14, v14
+; CHECK-NEXT: v_accvgpr_write_b32 a15, v15
+; CHECK-NEXT: v_accvgpr_write_b32 a16, v16
+; CHECK-NEXT: v_accvgpr_write_b32 a17, v17
+; CHECK-NEXT: v_accvgpr_write_b32 a18, v18
+; CHECK-NEXT: v_accvgpr_write_b32 a19, v19
+; CHECK-NEXT: v_accvgpr_write_b32 a20, v20
+; CHECK-NEXT: v_accvgpr_write_b32 a21, v21
+; CHECK-NEXT: v_accvgpr_write_b32 a22, v22
+; CHECK-NEXT: v_accvgpr_write_b32 a23, v23
+; CHECK-NEXT: v_accvgpr_write_b32 a24, v24
+; CHECK-NEXT: v_accvgpr_write_b32 a25, v25
+; CHECK-NEXT: v_accvgpr_write_b32 a26, v26
+; CHECK-NEXT: v_accvgpr_write_b32 a27, v27
+; CHECK-NEXT: v_accvgpr_write_b32 a28, v28
+; CHECK-NEXT: v_accvgpr_write_b32 a29, v29
+; CHECK-NEXT: v_accvgpr_write_b32 a30, v30
+; CHECK-NEXT: v_accvgpr_write_b32 a31, v31
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a[0:31]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+bb:
+ %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> splat (float 2.0), i32 0, i32 0, i32 0)
+ call void asm sideeffect "; use $0", "a"(<32 x float> %mai)
+ ret void
+}
+
+; TODO: Handle rewriting this case
+define void @test_rewrite_mfma_subreg_extract0(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 {
+; CHECK-LABEL: test_rewrite_mfma_subreg_extract0:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112
+; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96
+; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80
+; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64
+; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48
+; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32
+; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33]
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 1
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v2
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v3
+; CHECK-NEXT: v_accvgpr_write_b32 a2, v4
+; CHECK-NEXT: v_accvgpr_write_b32 a3, v5
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a[0:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+bb:
+ %src2 = load <32 x float>, ptr addrspace(1) %ptr
+ %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0)
+ %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4)
+ ret void
+}
+
+define void @test_rewrite_mfma_subreg_extract1(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 {
+; CHECK-LABEL: test_rewrite_mfma_subreg_extract1:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112
+; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96
+; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80
+; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64
+; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48
+; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32
+; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33]
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 1
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v6
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v7
+; CHECK-NEXT: v_accvgpr_write_b32 a2, v8
+; CHECK-NEXT: v_accvgpr_write_b32 a3, v9
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a[0:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+bb:
+ %src2 = load <32 x float>, ptr addrspace(1) %ptr
+ %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0)
+ %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4)
+ ret void
+}
+
+; odd offset
+define void @test_rewrite_mfma_subreg_extract2(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 {
+; CHECK-LABEL: test_rewrite_mfma_subreg_extract2:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[30:33], v[2:3], off offset:112
+; CHECK-NEXT: global_load_dwordx4 v[26:29], v[2:3], off offset:96
+; CHECK-NEXT: global_load_dwordx4 v[22:25], v[2:3], off offset:80
+; CHECK-NEXT: global_load_dwordx4 v[18:21], v[2:3], off offset:64
+; CHECK-NEXT: global_load_dwordx4 v[14:17], v[2:3], off offset:48
+; CHECK-NEXT: global_load_dwordx4 v[10:13], v[2:3], off offset:32
+; CHECK-NEXT: global_load_dwordx4 v[6:9], v[2:3], off offset:16
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_load_dwordx4 v[2:5], v[2:3], off
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[2:33], v0, v1, v[2:33]
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 1
+; CHECK-NEXT: v_accvgpr_write_b32 a0, v3
+; CHECK-NEXT: v_accvgpr_write_b32 a1, v4
+; CHECK-NEXT: v_accvgpr_write_b32 a2, v5
+; CHECK-NEXT: v_accvgpr_write_b32 a3, v6
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: ; use a[0:3]
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+bb:
+ %src2 = load <32 x float>, ptr addrspace(1) %ptr
+ %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0)
+ %extract.sub4 = shufflevector <32 x float> %mai, <32 x float> poison, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ call void asm sideeffect "; use $0", "a"(<4 x float> %extract.sub4)
+ ret void
+}
+
+declare <4 x float> @llvm.amdgcn.mfma.f32.16x16x16f16(<4 x half>, <4 x half>, <4 x float>, i32 immarg, i32 immarg, i32 immarg) #2
+declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg) #2
+declare noundef range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.x() #3
attributes #0 = { nounwind "amdgpu-flat-work-group-size"="1,256" "amdgpu-waves-per-eu"="4,4" }
attributes #1 = { convergent nocallback nofree nosync nounwind willreturn memory(none) }
diff --git a/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll b/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll
deleted file mode 100644
index 726e35d..0000000
--- a/llvm/test/CodeGen/AMDGPU/test_isel_single_lane.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefix=GCN %s
-
-declare i32 @llvm.amdgcn.atomic.cond.sub.u32.p1(ptr addrspace(1), i32)
-
-
-define amdgpu_kernel void @test_isel_single_lane(ptr addrspace(1) %in, ptr addrspace(1) %out) #0 {
-; GCN-LABEL: test_isel_single_lane:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: s_load_b32 s4, s[0:1], 0x58
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
-; GCN-NEXT: global_atomic_cond_sub_u32 v1, v0, v1, s[0:1] offset:16 th:TH_ATOMIC_RETURN
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: v_readfirstlane_b32 s0, v1
-; GCN-NEXT: s_addk_co_i32 s0, 0xf4
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GCN-NEXT: s_lshl_b32 s1, s0, 4
-; GCN-NEXT: s_mul_i32 s0, s0, s1
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GCN-NEXT: s_lshl_b32 s0, s0, 12
-; GCN-NEXT: s_sub_co_i32 s0, s1, s0
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT: v_mov_b32_e32 v1, s0
-; GCN-NEXT: global_store_b32 v0, v1, s[2:3]
-; GCN-NEXT: s_endpgm
- %gep0 = getelementptr i32, ptr addrspace(1) %in, i32 22
- %val0 = load i32, ptr addrspace(1) %gep0, align 4
- %gep1 = getelementptr i32, ptr addrspace(1) %in, i32 4
- %val1 = call i32 @llvm.amdgcn.atomic.cond.sub.u32.p0(ptr addrspace(1) %gep1, i32 %val0)
- %res0 = add i32 %val1, 244
- %res1 = shl i32 %res0, 4
- %res2 = mul i32 %res0, %res1
- %res3 = shl i32 %res2, 12
- %res4 = sub i32 %res1, %res3
- store i32 %res4, ptr addrspace(1) %out
- ret void
-}
-
-
-attributes #0 = {
- "amdgpu-flat-work-group-size"="1,1"
- "amdgpu-waves-per-eu"="1,1"
- "uniform-work-group-size"="true"
-}
diff --git a/llvm/test/CodeGen/AMDGPU/wqm.mir b/llvm/test/CodeGen/AMDGPU/wqm.mir
index 350b233..ceb1b3e 100644
--- a/llvm/test/CodeGen/AMDGPU/wqm.mir
+++ b/llvm/test/CodeGen/AMDGPU/wqm.mir
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs -run-pass si-wqm -o - %s | FileCheck %s
# RUN: llc -mtriple=amdgcn -mcpu=fiji -passes=si-wqm -o - %s | FileCheck %s
@@ -46,10 +47,6 @@
---
# Check for awareness that s_or_saveexec_b64 clobbers SCC
-#
-#CHECK: ENTER_STRICT_WWM
-#CHECK: S_CMP_LT_I32
-#CHECK: S_CSELECT_B32
name: test_strict_wwm_scc
alignment: 1
exposesReturnsTwice: false
@@ -80,6 +77,21 @@ body: |
bb.0:
liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0
+ ; CHECK-LABEL: name: test_strict_wwm_scc
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; CHECK-NEXT: S_CMP_LT_I32 0, [[COPY3]], implicit-def $scc
+ ; CHECK-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[COPY]], [[COPY]], implicit-def $vcc, implicit $exec
+ ; CHECK-NEXT: [[S_CSELECT_B32_:%[0-9]+]]:sgpr_32 = S_CSELECT_B32 [[COPY1]], [[COPY2]], implicit $scc
+ ; CHECK-NEXT: [[V_ADD_CO_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_CSELECT_B32_]], [[V_ADD_CO_U32_e32_]], implicit-def $vcc, implicit $exec
+ ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]]
+ ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_ADD_CO_U32_e32_1]], implicit $exec
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0
%3 = COPY $vgpr0
%2 = COPY $sgpr2
%1 = COPY $sgpr1
@@ -96,16 +108,35 @@ body: |
---
# Second test for awareness that s_or_saveexec_b64 clobbers SCC
# Because entry block is treated differently.
-#
-#CHECK: %bb.1
-#CHECK: S_CMP_LT_I32
-#CHECK: COPY $scc
-#CHECK: ENTER_STRICT_WWM
-#CHECK: $scc = COPY
-#CHECK: S_CSELECT_B32
name: test_strict_wwm_scc2
tracksRegLiveness: true
body: |
+ ; CHECK-LABEL: name: test_strict_wwm_scc2
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]]
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: S_CMP_LT_I32 0, [[COPY3]], implicit-def $scc
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY]], [[DEF]], 0, 0, 0, 0, implicit $exec
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY $scc
+ ; CHECK-NEXT: [[ENTER_STRICT_WWM1:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: $scc = COPY [[COPY4]]
+ ; CHECK-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[COPY]], [[COPY]], implicit-def $vcc, implicit $exec
+ ; CHECK-NEXT: [[S_CSELECT_B32_:%[0-9]+]]:sgpr_32 = S_CSELECT_B32 [[COPY1]], [[COPY2]], implicit $scc
+ ; CHECK-NEXT: [[V_ADD_CO_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 [[S_CSELECT_B32_]], [[V_ADD_CO_U32_e32_]], implicit-def $vcc, implicit $exec
+ ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM1]]
+ ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_ADD_CO_U32_e32_1]], implicit $exec
+ ; CHECK-NEXT: $vgpr1 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1
bb.0:
liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0
@@ -130,7 +161,6 @@ body: |
---
# V_SET_INACTIVE, when its second operand is undef, is replaced by a
# COPY by si-wqm. Ensure the instruction is removed.
-#CHECK-NOT: V_SET_INACTIVE
name: no_cfg
alignment: 1
exposesReturnsTwice: false
@@ -167,6 +197,28 @@ body: |
bb.0:
liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3
+ ; CHECK-LABEL: name: no_cfg
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2, [[COPY]], %subreg.sub3
+ ; CHECK-NEXT: dead [[COPY4:%[0-9]+]]:sgpr_128 = COPY [[REG_SEQUENCE]]
+ ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFSET]].sub1
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; CHECK-NEXT: dead [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; CHECK-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY6]], implicit $exec, implicit-def $scc
+ ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; CHECK-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY8]], [[COPY7]], 323, 12, 15, 0, implicit $exec
+ ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]]
+ ; CHECK-NEXT: early-clobber %15:vgpr_32 = V_MOV_B32_e32 [[V_MOV_B32_dpp]], implicit $exec
+ ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact %15, [[REG_SEQUENCE]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
%3:sgpr_32 = COPY $sgpr3
%2:sgpr_32 = COPY $sgpr2
%1:sgpr_32 = COPY $sgpr1
@@ -189,18 +241,32 @@ body: |
---
# Ensure that strict_wwm is not put around an EXEC copy
-#CHECK-LABEL: name: copy_exec
-#CHECK: %7:sreg_64 = COPY $exec
-#CHECK-NEXT: %13:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec
-#CHECK-NEXT: %8:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-#CHECK-NEXT: $exec = EXIT_STRICT_WWM %13
-#CHECK-NEXT: %9:vgpr_32 = V_MBCNT_LO_U32_B32_e64 %7.sub0, 0, implicit $exec
name: copy_exec
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3
+ ; CHECK-LABEL: name: copy_exec
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr3
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY1]], %subreg.sub2, [[COPY]], %subreg.sub3
+ ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; CHECK-NEXT: dead [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:sreg_64 = COPY $exec
+ ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64 = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]]
+ ; CHECK-NEXT: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY4]].sub0, 0, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[V_MOV_B32_e32_]], [[V_MBCNT_LO_U32_B32_e64_]], 312, 15, 15, 0, implicit $exec
+ ; CHECK-NEXT: dead [[V_READLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READLANE_B32 [[V_MOV_B32_dpp]], 63
+ ; CHECK-NEXT: early-clobber %12:vgpr_32 = V_MOV_B32_e32 [[V_MOV_B32_e32_]], implicit $exec
+ ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact %12, [[REG_SEQUENCE]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
%3:sgpr_32 = COPY $sgpr3
%2:sgpr_32 = COPY $sgpr2
%1:sgpr_32 = COPY $sgpr1
@@ -224,20 +290,48 @@ body: |
---
# Check exit of WQM is still inserted correctly when SCC is live until block end.
# Critially this tests that compilation does not fail.
-#CHECK-LABEL: name: scc_always_live
-#CHECK: %8:vreg_128 = IMAGE_SAMPLE_V4_V2 %7
-#CHECK-NEXT: S_CMP_EQ_U32 %2, 0, implicit-def $scc
-#CHECK-NEXT: undef %9.sub0:vreg_64 = nsz arcp nofpexcept V_ADD_F32_e64
-#CHECK-NEXT: %9.sub1:vreg_64 = nsz arcp nofpexcept V_MUL_F32_e32
-#CHECK-NEXT: %14:sreg_32_xm0 = COPY $scc
-#CHECK-NEXT: $exec = S_AND_B64 $exec, %13, implicit-def $scc
-#CHECK-NEXT: $scc = COPY %14
-#CHECK-NEXT: %10:vgpr_32 = nsz arcp nofpexcept V_ADD_F32_e64
-#CHECK-NEXT: %11:vreg_128 = IMAGE_SAMPLE_V4_V2
-#CHECK-NEXT: S_CBRANCH_SCC0 %bb.2
name: scc_always_live
tracksRegLiveness: true
body: |
+ ; CHECK-LABEL: name: scc_always_live
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $sgpr1, $sgpr2, $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $exec
+ ; CHECK-NEXT: $m0 = COPY $sgpr1
+ ; CHECK-NEXT: $exec = S_WQM_B64 $exec, implicit-def $scc
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr2
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[V_INTERP_P1_F32_:%[0-9]+]]:vgpr_32 = V_INTERP_P1_F32 [[COPY1]], 3, 2, implicit $mode, implicit $m0, implicit $exec
+ ; CHECK-NEXT: [[V_INTERP_P1_F32_1:%[0-9]+]]:vgpr_32 = V_INTERP_P1_F32 [[COPY2]], 3, 2, implicit $mode, implicit $m0, implicit $exec
+ ; CHECK-NEXT: undef [[COPY4:%[0-9]+]].sub0:vreg_64 = COPY [[V_INTERP_P1_F32_]]
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]].sub1:vreg_64 = COPY [[V_INTERP_P1_F32_1]]
+ ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY4]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
+ ; CHECK-NEXT: S_CMP_EQ_U32 [[COPY3]], 0, implicit-def $scc
+ ; CHECK-NEXT: undef [[V_ADD_F32_e64_:%[0-9]+]].sub0:vreg_64 = nsz arcp nofpexcept V_ADD_F32_e64 0, [[IMAGE_SAMPLE_V4_V2_]].sub0, 0, [[V_INTERP_P1_F32_1]], 1, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[V_ADD_F32_e64_:%[0-9]+]].sub1:vreg_64 = nsz arcp nofpexcept V_MUL_F32_e32 [[V_INTERP_P1_F32_]], [[V_INTERP_P1_F32_1]], implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:sreg_32_xm0 = COPY $scc
+ ; CHECK-NEXT: $exec = S_AND_B64 $exec, [[COPY]], implicit-def $scc
+ ; CHECK-NEXT: $scc = COPY [[COPY5]]
+ ; CHECK-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nsz arcp nofpexcept V_ADD_F32_e64 0, [[V_INTERP_P1_F32_]], 0, [[V_INTERP_P1_F32_1]], 1, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_1:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[V_ADD_F32_e64_]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
+ ; CHECK-NEXT: S_CBRANCH_SCC0 %bb.2, implicit $scc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET_exact [[V_ADD_F32_e64_1]], [[DEF1]], [[S_MOV_B32_]], 4, 0, 0, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $vgpr0 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub0
+ ; CHECK-NEXT: $vgpr1 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub1
+ ; CHECK-NEXT: $vgpr2 = COPY [[IMAGE_SAMPLE_V4_V2_1]].sub0
+ ; CHECK-NEXT: $vgpr3 = COPY [[IMAGE_SAMPLE_V4_V2_1]].sub1
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1, $vgpr2, $vgpr3
bb.0:
liveins: $sgpr1, $sgpr2, $vgpr1, $vgpr2
@@ -281,18 +375,26 @@ body: |
---
# Check that unnecessary instruction do not get marked for WWM
#
-#CHECK-NOT: ENTER_STRICT_WWM
-#CHECK: BUFFER_LOAD_DWORDX2
-#CHECK: ENTER_STRICT_WWM
-#CHECK: V_SET_INACTIVE_B32
-#CHECK: V_SET_INACTIVE_B32
-#CHECK-NOT: ENTER_STRICT_WWM
-#CHECK: V_MAX
name: test_wwm_set_inactive_propagation
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0
+ ; CHECK-LABEL: name: test_wwm_set_inactive_propagation
+ ; CHECK: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFEN [[COPY1]], [[COPY]], 0, 0, 0, 0, implicit $exec
+ ; CHECK-NEXT: [[ENTER_STRICT_WWM:%[0-9]+]]:sreg_64_xexec = ENTER_STRICT_WWM -1, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: dead [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]].sub0:vreg_64 = V_SET_INACTIVE_B32 0, [[BUFFER_LOAD_DWORDX2_OFFEN]].sub0, 0, 0, undef [[ENTER_STRICT_WWM]], implicit $exec, implicit-def $scc
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]].sub1:vreg_64 = V_SET_INACTIVE_B32 0, [[BUFFER_LOAD_DWORDX2_OFFEN]].sub1, 0, 0, undef [[ENTER_STRICT_WWM]], implicit $exec, implicit-def $scc
+ ; CHECK-NEXT: [[V_MAX_F64_e64_:%[0-9]+]]:vreg_64 = nnan nsz arcp contract reassoc nofpexcept V_MAX_F64_e64 0, [[BUFFER_LOAD_DWORDX2_OFFEN]], 0, [[BUFFER_LOAD_DWORDX2_OFFEN]], 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: $exec = EXIT_STRICT_WWM [[ENTER_STRICT_WWM]]
+ ; CHECK-NEXT: early-clobber $vgpr0 = V_MOV_B32_e32 [[V_MAX_F64_e64_]].sub0, implicit $exec
+ ; CHECK-NEXT: early-clobber $vgpr1 = V_MOV_B32_e32 [[V_MAX_F64_e64_]].sub1, implicit $exec
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1
%0:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
%1:vgpr_32 = COPY $vgpr0
%2:vreg_64 = BUFFER_LOAD_DWORDX2_OFFEN %1:vgpr_32, %0:sgpr_128, 0, 0, 0, 0, implicit $exec
@@ -308,15 +410,46 @@ body: |
---
# Check that WQM marking occurs correctly through phi nodes in live range graph.
# If not then initial V_MOV will not be in WQM.
-#
-#CHECK-LABEL: name: test_wqm_lr_phi
-#CHECK: COPY $exec
-#CHECK-NEXT: S_WQM
-#CHECK-NEXT: V_MOV_B32_e32 -10
-#CHECK-NEXT: V_MOV_B32_e32 0
name: test_wqm_lr_phi
tracksRegLiveness: true
body: |
+ ; CHECK-LABEL: name: test_wqm_lr_phi
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $exec
+ ; CHECK-NEXT: $exec = S_WQM_B64 $exec, implicit-def $scc
+ ; CHECK-NEXT: undef [[V_MOV_B32_e32_:%[0-9]+]].sub0:vreg_64 = V_MOV_B32_e32 -10, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: [[S_GETPC_B64_:%[0-9]+]]:sreg_64 = S_GETPC_B64
+ ; CHECK-NEXT: [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM [[S_GETPC_B64_]], 32, 0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $vcc = V_CMP_LT_U32_e64 4, 4, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.3, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub0:vreg_64 = V_ADD_U32_e32 1, [[V_MOV_B32_e32_]].sub1, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub1:vreg_64 = V_ADD_U32_e32 1, [[V_MOV_B32_e32_]].sub1, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: $exec = S_AND_B64 $exec, [[COPY]], implicit-def $scc
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX8_IMM]], [[DEF]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), addrspace 7)
+ ; CHECK-NEXT: $vgpr0 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub0
+ ; CHECK-NEXT: $vgpr1 = COPY [[IMAGE_SAMPLE_V4_V2_]].sub1
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG $vgpr0, $vgpr1
bb.0:
undef %0.sub0:vreg_64 = V_MOV_B32_e32 -10, implicit $exec
%0.sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec
@@ -345,14 +478,20 @@ body: |
...
---
-#CHECK-LABEL: name: no_wqm_in_cs
-#CHECK-NOT: S_WQM
name: no_wqm_in_cs
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: no_wqm_in_cs
+ ; CHECK: liveins: $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
undef %0.sub0:vreg_64 = COPY $vgpr1
%0.sub1:vreg_64 = COPY $vgpr2
%100:sgpr_256 = IMPLICIT_DEF
@@ -362,14 +501,20 @@ body: |
...
---
-#CHECK-LABEL: name: no_wqm_in_es
-#CHECK-NOT: S_WQM
name: no_wqm_in_es
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: no_wqm_in_es
+ ; CHECK: liveins: $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
undef %0.sub0:vreg_64 = COPY $vgpr1
%0.sub1:vreg_64 = COPY $vgpr2
%100:sgpr_256 = IMPLICIT_DEF
@@ -379,14 +524,20 @@ body: |
...
---
-#CHECK-LABEL: name: no_wqm_in_gs
-#CHECK-NOT: S_WQM
name: no_wqm_in_gs
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: no_wqm_in_gs
+ ; CHECK: liveins: $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
undef %0.sub0:vreg_64 = COPY $vgpr1
%0.sub1:vreg_64 = COPY $vgpr2
%100:sgpr_256 = IMPLICIT_DEF
@@ -396,14 +547,20 @@ body: |
...
---
-#CHECK-LABEL: name: no_wqm_in_hs
-#CHECK-NOT: S_WQM
name: no_wqm_in_hs
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: no_wqm_in_hs
+ ; CHECK: liveins: $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
undef %0.sub0:vreg_64 = COPY $vgpr1
%0.sub1:vreg_64 = COPY $vgpr2
%100:sgpr_256 = IMPLICIT_DEF
@@ -413,14 +570,20 @@ body: |
...
---
-#CHECK-LABEL: name: no_wqm_in_ls
-#CHECK-NOT: S_WQM
name: no_wqm_in_ls
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: no_wqm_in_ls
+ ; CHECK: liveins: $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
undef %0.sub0:vreg_64 = COPY $vgpr1
%0.sub1:vreg_64 = COPY $vgpr2
%100:sgpr_256 = IMPLICIT_DEF
@@ -430,14 +593,20 @@ body: |
...
---
-#CHECK-LABEL: name: no_wqm_in_vs
-#CHECK-NOT: S_WQM
name: no_wqm_in_vs
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: no_wqm_in_vs
+ ; CHECK: liveins: $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY $vgpr2
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sgpr_256 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: dead [[IMAGE_SAMPLE_V4_V2_:%[0-9]+]]:vreg_128 = IMAGE_SAMPLE_V4_V2 [[COPY]], [[DEF]], [[DEF1]], 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128), align 4, addrspace 4)
undef %0.sub0:vreg_64 = COPY $vgpr1
%0.sub1:vreg_64 = COPY $vgpr2
%100:sgpr_256 = IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/ARM/scmp.ll b/llvm/test/CodeGen/ARM/scmp.ll
index 6e493c9..9189aee 100644
--- a/llvm/test/CodeGen/ARM/scmp.ll
+++ b/llvm/test/CodeGen/ARM/scmp.ll
@@ -4,12 +4,9 @@
define i8 @scmp_8_8(i8 signext %x, i8 signext %y) nounwind {
; CHECK-LABEL: scmp_8_8:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlt r0, #1
-; CHECK-NEXT: movwgt r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.scmp(i8 %x, i8 %y)
ret i8 %1
@@ -18,12 +15,9 @@ define i8 @scmp_8_8(i8 signext %x, i8 signext %y) nounwind {
define i8 @scmp_8_16(i16 signext %x, i16 signext %y) nounwind {
; CHECK-LABEL: scmp_8_16:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlt r0, #1
-; CHECK-NEXT: movwgt r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.scmp(i16 %x, i16 %y)
ret i8 %1
@@ -32,12 +26,9 @@ define i8 @scmp_8_16(i16 signext %x, i16 signext %y) nounwind {
define i8 @scmp_8_32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: scmp_8_32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlt r0, #1
-; CHECK-NEXT: movwgt r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.scmp(i32 %x, i32 %y)
ret i8 %1
@@ -92,17 +83,26 @@ define i8 @scmp_8_128(i128 %x, i128 %y) nounwind {
define i32 @scmp_32_32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: scmp_32_32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlt r0, #1
-; CHECK-NEXT: movwgt r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
; CHECK-NEXT: bx lr
%1 = call i32 @llvm.scmp(i32 %x, i32 %y)
ret i32 %1
}
+define i32 @scmp_neg(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: scmp_neg:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: adds r0, r0, r1
+; CHECK-NEXT: movwgt r0, #1
+; CHECK-NEXT: mvnlt r0, #0
+; CHECK-NEXT: bx lr
+ %yy = sub nsw i32 0, %y
+ %1 = call i32 @llvm.scmp(i32 %x, i32 %yy)
+ ret i32 %1
+}
+
define i32 @scmp_32_64(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: scmp_32_64:
; CHECK: @ %bb.0:
diff --git a/llvm/test/CodeGen/ARM/ucmp.ll b/llvm/test/CodeGen/ARM/ucmp.ll
index ad4af53..bb02014 100644
--- a/llvm/test/CodeGen/ARM/ucmp.ll
+++ b/llvm/test/CodeGen/ARM/ucmp.ll
@@ -4,12 +4,9 @@
define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind {
; CHECK-LABEL: ucmp_8_8:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlo r0, #1
-; CHECK-NEXT: movwhi r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: mvnlo r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i8 %x, i8 %y)
ret i8 %1
@@ -18,12 +15,9 @@ define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind {
define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind {
; CHECK-LABEL: ucmp_8_16:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlo r0, #1
-; CHECK-NEXT: movwhi r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: mvnlo r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i16 %x, i16 %y)
ret i8 %1
@@ -32,12 +26,9 @@ define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind {
define i8 @ucmp_8_32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: ucmp_8_32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlo r0, #1
-; CHECK-NEXT: movwhi r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: mvnlo r0, #0
; CHECK-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
ret i8 %1
@@ -92,12 +83,9 @@ define i8 @ucmp_8_128(i128 %x, i128 %y) nounwind {
define i32 @ucmp_32_32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: ucmp_32_32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov r0, #0
-; CHECK-NEXT: mov r2, #0
-; CHECK-NEXT: movwlo r0, #1
-; CHECK-NEXT: movwhi r2, #1
-; CHECK-NEXT: sub r0, r2, r0
+; CHECK-NEXT: subs r0, r0, r1
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: mvnlo r0, #0
; CHECK-NEXT: bx lr
%1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
ret i32 %1
diff --git a/llvm/test/CodeGen/BPF/loop-exit-cond.ll b/llvm/test/CodeGen/BPF/loop-exit-cond.ll
index 69fe714..fa6a4a0 100644
--- a/llvm/test/CodeGen/BPF/loop-exit-cond.ll
+++ b/llvm/test/CodeGen/BPF/loop-exit-cond.ll
@@ -35,14 +35,14 @@ define dso_local i32 @test(i32 %len, ptr %data) #0 {
; CHECK-NEXT: br i1 [[OR_COND]], label [[FOR_BODY:%.*]], label [[IF_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[I_05:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 1, [[ENTRY:%.*]] ]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[D]]) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[D]]) #[[ATTR3:[0-9]+]]
; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[DATA]], align 1, !tbaa [[TBAA3:![0-9]+]]
; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i8 [[TMP1]], 0
; CHECK-NEXT: [[NARROW:%.*]] = select i1 [[TOBOOL_NOT]], i8 48, i8 [[TMP1]]
; CHECK-NEXT: [[CONV2:%.*]] = sext i8 [[NARROW]] to i64
; CHECK-NEXT: store i64 [[CONV2]], ptr [[D]], align 8, !tbaa [[TBAA6:![0-9]+]]
; CHECK-NEXT: call void @foo(ptr nonnull @.str, i32 [[I_05]], ptr nonnull [[D]]) #[[ATTR3]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[D]]) #[[ATTR3]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[D]]) #[[ATTR3]]
; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_05]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]]
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[IF_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
@@ -61,7 +61,7 @@ entry:
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #3
+ call void @llvm.lifetime.start.p0(ptr %i) #3
store i32 1, ptr %i, align 4, !tbaa !3
br label %for.cond
@@ -73,11 +73,11 @@ for.cond: ; preds = %for.inc, %if.then
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #3
+ call void @llvm.lifetime.end.p0(ptr %i) #3
br label %for.end
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 8, ptr %d) #3
+ call void @llvm.lifetime.start.p0(ptr %d) #3
%3 = load ptr, ptr %data.addr, align 8, !tbaa !7
%4 = load i8, ptr %3, align 1, !tbaa !9
%conv = sext i8 %4 to i32
@@ -96,7 +96,7 @@ cond.end: ; preds = %cond.false, %cond.t
store i64 %conv2, ptr %d, align 8, !tbaa !10
%5 = load i32, ptr %i, align 4, !tbaa !3
call void @foo(ptr @.str, i32 %5, ptr %d)
- call void @llvm.lifetime.end.p0(i64 8, ptr %d) #3
+ call void @llvm.lifetime.end.p0(ptr %d) #3
br label %for.inc
for.inc: ; preds = %cond.end
@@ -113,12 +113,12 @@ if.end: ; preds = %for.end, %entry
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare dso_local void @foo(ptr, i32, ptr) #2
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
attributes #1 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/CodeGen/BPF/vla.ll b/llvm/test/CodeGen/BPF/vla.ll
index 9a22769..708b41e 100644
--- a/llvm/test/CodeGen/BPF/vla.ll
+++ b/llvm/test/CodeGen/BPF/vla.ll
@@ -33,17 +33,17 @@ define dso_local i32 @test1() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: store i32 8, ptr [[A]], align 4
; CHECK-NEXT: [[VLA:%.*]] = alloca i8, i64 68, align 1
; CHECK-NEXT: call void @foo(ptr [[VLA]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret i32 0
;
entry:
%a = alloca i32, align 4
%saved_stack = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
store i32 8, ptr %a, align 4
%0 = call ptr @llvm.stacksave()
store ptr %0, ptr %saved_stack, align 8
@@ -51,11 +51,11 @@ entry:
call void @foo(ptr %vla)
%1 = load ptr, ptr %saved_stack, align 8
call void @llvm.stackrestore(ptr %1)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret i32 0
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare ptr @llvm.stacksave()
@@ -63,7 +63,7 @@ declare dso_local void @foo(ptr)
declare void @llvm.stackrestore(ptr)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define dso_local i32 @test2(i32 %b) {
; CHECK-LABEL: @test2(
@@ -73,7 +73,7 @@ define dso_local i32 @test2(i32 %b) {
; CHECK-NEXT: [[SAVED_STACK:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8
; CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: store i32 8, ptr [[A]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 8, [[TMP1]]
@@ -81,7 +81,7 @@ define dso_local i32 @test2(i32 %b) {
; CHECK-NEXT: [[VLA:%.*]] = alloca i8, i64 [[TMP2]], align 1
; CHECK-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8
; CHECK-NEXT: call void @foo(ptr [[VLA]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret i32 0
;
entry:
@@ -90,7 +90,7 @@ entry:
%saved_stack = alloca ptr, align 8
%__vla_expr0 = alloca i64, align 8
store i32 %b, ptr %b.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
store i32 8, ptr %a, align 4
%0 = load i32, ptr %b.addr, align 4
%add = add nsw i32 8, %0
@@ -102,6 +102,6 @@ entry:
call void @foo(ptr %vla)
%3 = load ptr, ptr %saved_stack, align 8
call void @llvm.stackrestore(ptr %3)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret i32 0
}
diff --git a/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll b/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll
index 4265553..9c564ff 100644
--- a/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll
+++ b/llvm/test/CodeGen/NVPTX/frameindex-lifetime.ll
@@ -44,8 +44,8 @@ declare void @bar(ptr)
define void @foo() {
%p = alloca i32
- call void @llvm.lifetime.start(i64 4, ptr %p)
+ call void @llvm.lifetime.start(ptr %p)
call void @bar(ptr %p)
- call void @llvm.lifetime.end(i64 4, ptr %p)
+ call void @llvm.lifetime.end(ptr %p)
ret void
}
diff --git a/llvm/test/CodeGen/NVPTX/variadics-lowering.ll b/llvm/test/CodeGen/NVPTX/variadics-lowering.ll
index 5502980..1d69f8d 100644
--- a/llvm/test/CodeGen/NVPTX/variadics-lowering.ll
+++ b/llvm/test/CodeGen/NVPTX/variadics-lowering.ll
@@ -119,7 +119,7 @@ define dso_local i32 @foo() {
; CHECK-NEXT: [[CONV:%.*]] = sext i8 1 to i32
; CHECK-NEXT: [[CONV1:%.*]] = sext i16 1 to i32
; CHECK-NEXT: [[CONV2:%.*]] = fpext float 1.000000e+00 to double
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 40, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; CHECK-NEXT: store i32 [[CONV]], ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
@@ -133,7 +133,7 @@ define dso_local i32 @foo() {
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[FOO_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 6
; CHECK-NEXT: store double 1.000000e+00, ptr [[TMP5]], align 8
; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics1(i32 noundef 1, ptr [[VARARG_BUFFER]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 40, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: ret i32 [[CALL]]
;
entry:
@@ -208,7 +208,7 @@ define dso_local i32 @bar() {
; CHECK-NEXT: [[S1_SROA_2_0_COPYLOAD:%.*]] = load i8, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 4), align 4
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[S1_SROA_3]], ptr align 1 getelementptr inbounds (i8, ptr @__const.bar.s1, i64 5), i64 3, i1 false)
; CHECK-NEXT: [[S1_SROA_31_0_COPYLOAD:%.*]] = load i64, ptr getelementptr inbounds (i8, ptr @__const.bar.s1, i64 8), align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; CHECK-NEXT: store i32 [[S1_SROA_0_0_COPYLOAD]], ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
@@ -216,7 +216,7 @@ define dso_local i32 @bar() {
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[BAR_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 3
; CHECK-NEXT: store i64 [[S1_SROA_31_0_COPYLOAD]], ptr [[TMP2]], align 8
; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics2(i32 noundef 1, ptr [[VARARG_BUFFER]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: ret i32 [[CALL]]
;
entry:
@@ -274,11 +274,11 @@ define dso_local i32 @baz() {
; CHECK-LABEL: define dso_local i32 @baz() {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[BAZ_VARARG:%.*]], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[BAZ_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; CHECK-NEXT: store <4 x i32> splat (i32 1), ptr [[TMP0]], align 16
; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics3(i32 noundef 1, ptr [[VARARG_BUFFER]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: ret i32 [[CALL]]
;
entry:
@@ -333,11 +333,11 @@ define dso_local void @qux() {
; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_S2:%.*]], align 8
; CHECK-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[QUX_VARARG:%.*]], align 8
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[S]], ptr align 8 @__const.qux.s, i64 16, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[QUX_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; CHECK-NEXT: store i64 1, ptr [[TMP0]], align 8
; CHECK-NEXT: [[CALL:%.*]] = call i32 @variadics4(ptr noundef byval([[STRUCT_S2]]) align 8 [[S]], ptr [[VARARG_BUFFER]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir b/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
index 41e2124..2796cdb 100644
--- a/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
+++ b/llvm/test/CodeGen/PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir
@@ -1,6 +1,12 @@
# RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu -start-after \
# RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \
# RUN: -o - | FileCheck %s
+# RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu -start-after \
+# RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \
+# RUN: -o - | FileCheck %s
+# RUN: llc -mcpu=pwr10 -mtriple=powerpc64le-unknown-linux-gnu -start-after \
+# RUN: virtregrewriter -ppc-asm-full-reg-names -verify-machineinstrs %s \
+# RUN: -o - | FileCheck %s
--- |
; ModuleID = 'a.ll'
@@ -30,7 +36,7 @@
; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #1
- attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+htm,+power8-vector,+vsx,-power9-vector" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind }
!llvm.ident = !{!0}
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
index 483d707..3d93eca 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
@@ -17,11 +17,11 @@
; CL: OpFunction
; CL: %[[#FooVar:]] = OpVariable
; CL-NEXT: %[[#Casted1:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]]
-; CL-NEXT: OpLifetimeStart %[[#Casted1]], 72
+; CL-NEXT: OpLifetimeStart %[[#Casted1]], 16
; CL-NEXT: OpBitcast
; CL-NEXT: OpInBoundsPtrAccessChain
; CL-NEXT: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#FooVar]]
-; CL-NEXT: OpLifetimeStop %[[#Casted2]], 72
+; CL-NEXT: OpLifetimeStop %[[#Casted2]], 16
; VK: OpFunction
; VK: %[[#FooVar:]] = OpVariable
@@ -29,18 +29,20 @@
; VK-NEXT: OpReturn
define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange) {
%RoundedRangeKernel = alloca %tprange, align 8
- call void @llvm.lifetime.start.p0(i64 72, ptr nonnull %RoundedRangeKernel)
+ call void @llvm.lifetime.start.p0(ptr nonnull %RoundedRangeKernel)
%KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8
- call void @llvm.lifetime.end.p0(i64 72, ptr nonnull %RoundedRangeKernel)
+ call void @llvm.lifetime.end.p0(ptr nonnull %RoundedRangeKernel)
ret void
}
; CL: OpFunction
; CL: %[[#BarVar:]] = OpVariable
-; CL-NEXT: OpLifetimeStart %[[#BarVar]], 0
+; CL-NEXT: %[[#Casted1:]] = OpBitcast %[[#PtrChar]] %[[#BarVar]]
+; CL-NEXT: OpLifetimeStart %[[#Casted1]], 16
; CL-NEXT: OpBitcast
; CL-NEXT: OpInBoundsPtrAccessChain
-; CL-NEXT: OpLifetimeStop %[[#BarVar]], 0
+; CL-NEXT: %[[#Casted2:]] = OpBitcast %[[#PtrChar]] %[[#BarVar]]
+; CL-NEXT: OpLifetimeStop %[[#Casted2]], 16
; VK: OpFunction
; VK: %[[#BarVar:]] = OpVariable
@@ -48,9 +50,9 @@ define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange)
; VK-NEXT: OpReturn
define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange) {
%RoundedRangeKernel = alloca %tprange, align 8
- call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %RoundedRangeKernel)
+ call void @llvm.lifetime.start.p0(ptr nonnull %RoundedRangeKernel)
%KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8
- call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %RoundedRangeKernel)
+ call void @llvm.lifetime.end.p0(ptr nonnull %RoundedRangeKernel)
ret void
}
@@ -66,12 +68,12 @@ define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange)
; VK-NEXT: OpReturn
define spir_func void @test(ptr noundef align 8 %_arg) {
%var = alloca i8, align 8
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %var)
+ call void @llvm.lifetime.start.p0(ptr nonnull %var)
%KernelFunc = getelementptr inbounds i8, ptr %var, i64 1
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %var)
+ call void @llvm.lifetime.end.p0(ptr nonnull %var)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/CodeGen/Thumb/scmp.ll b/llvm/test/CodeGen/Thumb/scmp.ll
index 661dbe9..c002449 100644
--- a/llvm/test/CodeGen/Thumb/scmp.ll
+++ b/llvm/test/CodeGen/Thumb/scmp.ll
@@ -1,151 +1,420 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv6m-eabi %s -o - | FileCheck %s -check-prefix=THUMB1
+; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s -check-prefix=THUMB2
+; RUN: llc -mtriple thumbv8.1m.main-none-eabi -o - %s | FileCheck %s --check-prefix=V81M
define i8 @scmp_8_8(i8 signext %x, i8 signext %y) nounwind {
-; CHECK-LABEL: scmp_8_8:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r0, #1
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: scmp_8_8:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: movs r2, #1
+; THUMB1-NEXT: movs r3, #0
+; THUMB1-NEXT: cmp r0, r1
+; THUMB1-NEXT: mov r0, r2
+; THUMB1-NEXT: bge .LBB0_3
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: ble .LBB0_4
+; THUMB1-NEXT: .LBB0_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+; THUMB1-NEXT: .LBB0_3:
+; THUMB1-NEXT: mov r0, r3
+; THUMB1-NEXT: bgt .LBB0_2
+; THUMB1-NEXT: .LBB0_4:
+; THUMB1-NEXT: mov r2, r3
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: scmp_8_8:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it gt
+; THUMB2-NEXT: movgt r0, #1
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: scmp_8_8:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, gt
+; V81M-NEXT: it lt
+; V81M-NEXT: movlt.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.scmp(i8 %x, i8 %y)
ret i8 %1
}
define i8 @scmp_8_16(i16 signext %x, i16 signext %y) nounwind {
-; CHECK-LABEL: scmp_8_16:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r0, #1
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: scmp_8_16:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: movs r2, #1
+; THUMB1-NEXT: movs r3, #0
+; THUMB1-NEXT: cmp r0, r1
+; THUMB1-NEXT: mov r0, r2
+; THUMB1-NEXT: bge .LBB1_3
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: ble .LBB1_4
+; THUMB1-NEXT: .LBB1_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+; THUMB1-NEXT: .LBB1_3:
+; THUMB1-NEXT: mov r0, r3
+; THUMB1-NEXT: bgt .LBB1_2
+; THUMB1-NEXT: .LBB1_4:
+; THUMB1-NEXT: mov r2, r3
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: scmp_8_16:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it gt
+; THUMB2-NEXT: movgt r0, #1
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: scmp_8_16:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, gt
+; V81M-NEXT: it lt
+; V81M-NEXT: movlt.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.scmp(i16 %x, i16 %y)
ret i8 %1
}
define i8 @scmp_8_32(i32 %x, i32 %y) nounwind {
-; CHECK-LABEL: scmp_8_32:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r0, #1
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: scmp_8_32:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: movs r2, #1
+; THUMB1-NEXT: movs r3, #0
+; THUMB1-NEXT: cmp r0, r1
+; THUMB1-NEXT: mov r0, r2
+; THUMB1-NEXT: bge .LBB2_3
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: ble .LBB2_4
+; THUMB1-NEXT: .LBB2_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+; THUMB1-NEXT: .LBB2_3:
+; THUMB1-NEXT: mov r0, r3
+; THUMB1-NEXT: bgt .LBB2_2
+; THUMB1-NEXT: .LBB2_4:
+; THUMB1-NEXT: mov r2, r3
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: scmp_8_32:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it gt
+; THUMB2-NEXT: movgt r0, #1
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: scmp_8_32:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, gt
+; V81M-NEXT: it lt
+; V81M-NEXT: movlt.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.scmp(i32 %x, i32 %y)
ret i8 %1
}
define i8 @scmp_8_64(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: scmp_8_64:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: subs.w r12, r0, r2
-; CHECK-NEXT: mov.w r9, #0
-; CHECK-NEXT: sbcs.w r12, r1, r3
-; CHECK-NEXT: mov.w r12, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: sbcs.w r0, r3, r1
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r9, #1
-; CHECK-NEXT: sub.w r0, r9, r12
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: scmp_8_64:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, lr}
+; THUMB1-NEXT: push {r4, r5, r6, lr}
+; THUMB1-NEXT: movs r4, #1
+; THUMB1-NEXT: movs r5, #0
+; THUMB1-NEXT: subs r6, r0, r2
+; THUMB1-NEXT: mov r6, r1
+; THUMB1-NEXT: sbcs r6, r3
+; THUMB1-NEXT: mov r6, r4
+; THUMB1-NEXT: blt .LBB3_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: mov r6, r5
+; THUMB1-NEXT: .LBB3_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: sbcs r3, r1
+; THUMB1-NEXT: blt .LBB3_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: mov r4, r5
+; THUMB1-NEXT: .LBB3_4:
+; THUMB1-NEXT: subs r0, r4, r6
+; THUMB1-NEXT: pop {r4, r5, r6, pc}
+;
+; THUMB2-LABEL: scmp_8_64:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs.w r12, r0, r2
+; THUMB2-NEXT: mov.w r9, #0
+; THUMB2-NEXT: sbcs.w r12, r1, r3
+; THUMB2-NEXT: mov.w r12, #0
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r12, #1
+; THUMB2-NEXT: subs r0, r2, r0
+; THUMB2-NEXT: sbcs.w r0, r3, r1
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r9, #1
+; THUMB2-NEXT: sub.w r0, r9, r12
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: scmp_8_64:
+; V81M: @ %bb.0:
+; V81M-NEXT: subs.w r12, r0, r2
+; V81M-NEXT: sbcs.w r12, r1, r3
+; V81M-NEXT: cset r12, lt
+; V81M-NEXT: subs r0, r2, r0
+; V81M-NEXT: sbcs.w r0, r3, r1
+; V81M-NEXT: cset r0, lt
+; V81M-NEXT: sub.w r0, r0, r12
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.scmp(i64 %x, i64 %y)
ret i8 %1
}
define i8 @scmp_8_128(i128 %x, i128 %y) nounwind {
-; CHECK-LABEL: scmp_8_128:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: push {r4, r5, r6, lr}
-; CHECK-NEXT: add.w lr, sp, #16
-; CHECK-NEXT: ldr r4, [sp, #28]
-; CHECK-NEXT: movs r5, #0
-; CHECK-NEXT: ldm.w lr, {r9, r12, lr}
-; CHECK-NEXT: subs.w r6, r0, r9
-; CHECK-NEXT: sbcs.w r6, r1, r12
-; CHECK-NEXT: sbcs.w r6, r2, lr
-; CHECK-NEXT: sbcs.w r6, r3, r4
-; CHECK-NEXT: mov.w r6, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r6, #1
-; CHECK-NEXT: subs.w r0, r9, r0
-; CHECK-NEXT: sbcs.w r0, r12, r1
-; CHECK-NEXT: sbcs.w r0, lr, r2
-; CHECK-NEXT: sbcs.w r0, r4, r3
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r5, #1
-; CHECK-NEXT: subs r0, r5, r6
-; CHECK-NEXT: pop {r4, r5, r6, pc}
+; THUMB1-LABEL: scmp_8_128:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, r7, lr}
+; THUMB1-NEXT: push {r4, r5, r6, r7, lr}
+; THUMB1-NEXT: .pad #20
+; THUMB1-NEXT: sub sp, #20
+; THUMB1-NEXT: str r3, [sp, #16] @ 4-byte Spill
+; THUMB1-NEXT: movs r3, #1
+; THUMB1-NEXT: str r3, [sp] @ 4-byte Spill
+; THUMB1-NEXT: movs r3, #0
+; THUMB1-NEXT: str r3, [sp, #12] @ 4-byte Spill
+; THUMB1-NEXT: ldr r6, [sp, #52]
+; THUMB1-NEXT: add r7, sp, #40
+; THUMB1-NEXT: ldm r7, {r3, r5, r7}
+; THUMB1-NEXT: subs r4, r0, r3
+; THUMB1-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; THUMB1-NEXT: mov r4, r1
+; THUMB1-NEXT: ldr r1, [sp] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r4, r5
+; THUMB1-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; THUMB1-NEXT: mov r4, r2
+; THUMB1-NEXT: sbcs r4, r7
+; THUMB1-NEXT: ldr r4, [sp, #16] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r4, r6
+; THUMB1-NEXT: mov r2, r1
+; THUMB1-NEXT: blt .LBB4_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: ldr r2, [sp, #12] @ 4-byte Reload
+; THUMB1-NEXT: .LBB4_2:
+; THUMB1-NEXT: subs r0, r3, r0
+; THUMB1-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r5, r0
+; THUMB1-NEXT: ldr r0, [sp, #8] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r7, r0
+; THUMB1-NEXT: ldr r0, [sp, #16] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r6, r0
+; THUMB1-NEXT: blt .LBB4_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
+; THUMB1-NEXT: .LBB4_4:
+; THUMB1-NEXT: subs r0, r1, r2
+; THUMB1-NEXT: add sp, #20
+; THUMB1-NEXT: pop {r4, r5, r6, r7, pc}
+;
+; THUMB2-LABEL: scmp_8_128:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: push {r4, r5, r6, lr}
+; THUMB2-NEXT: add.w lr, sp, #16
+; THUMB2-NEXT: ldr r4, [sp, #28]
+; THUMB2-NEXT: movs r5, #0
+; THUMB2-NEXT: ldm.w lr, {r9, r12, lr}
+; THUMB2-NEXT: subs.w r6, r0, r9
+; THUMB2-NEXT: sbcs.w r6, r1, r12
+; THUMB2-NEXT: sbcs.w r6, r2, lr
+; THUMB2-NEXT: sbcs.w r6, r3, r4
+; THUMB2-NEXT: mov.w r6, #0
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt r6, #1
+; THUMB2-NEXT: subs.w r0, r9, r0
+; THUMB2-NEXT: sbcs.w r0, r12, r1
+; THUMB2-NEXT: sbcs.w r0, lr, r2
+; THUMB2-NEXT: sbcs.w r0, r4, r3
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt r5, #1
+; THUMB2-NEXT: subs r0, r5, r6
+; THUMB2-NEXT: pop {r4, r5, r6, pc}
+;
+; V81M-LABEL: scmp_8_128:
+; V81M: @ %bb.0:
+; V81M-NEXT: .save {r4, r5, r6, lr}
+; V81M-NEXT: push {r4, r5, r6, lr}
+; V81M-NEXT: ldrd r5, r4, [sp, #16]
+; V81M-NEXT: ldrd lr, r12, [sp, #24]
+; V81M-NEXT: subs r6, r0, r5
+; V81M-NEXT: sbcs.w r6, r1, r4
+; V81M-NEXT: sbcs.w r6, r2, lr
+; V81M-NEXT: sbcs.w r6, r3, r12
+; V81M-NEXT: cset r6, lt
+; V81M-NEXT: subs r0, r5, r0
+; V81M-NEXT: sbcs.w r0, r4, r1
+; V81M-NEXT: sbcs.w r0, lr, r2
+; V81M-NEXT: sbcs.w r0, r12, r3
+; V81M-NEXT: cset r0, lt
+; V81M-NEXT: subs r0, r0, r6
+; V81M-NEXT: pop {r4, r5, r6, pc}
%1 = call i8 @llvm.scmp(i128 %x, i128 %y)
ret i8 %1
}
define i32 @scmp_32_32(i32 %x, i32 %y) nounwind {
-; CHECK-LABEL: scmp_32_32:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt r0, #1
-; CHECK-NEXT: it gt
-; CHECK-NEXT: movgt r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: scmp_32_32:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: movs r2, #1
+; THUMB1-NEXT: movs r3, #0
+; THUMB1-NEXT: cmp r0, r1
+; THUMB1-NEXT: mov r0, r2
+; THUMB1-NEXT: bge .LBB5_3
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: ble .LBB5_4
+; THUMB1-NEXT: .LBB5_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+; THUMB1-NEXT: .LBB5_3:
+; THUMB1-NEXT: mov r0, r3
+; THUMB1-NEXT: bgt .LBB5_2
+; THUMB1-NEXT: .LBB5_4:
+; THUMB1-NEXT: mov r2, r3
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: scmp_32_32:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it gt
+; THUMB2-NEXT: movgt r0, #1
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: scmp_32_32:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, gt
+; V81M-NEXT: it lt
+; V81M-NEXT: movlt.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i32 @llvm.scmp(i32 %x, i32 %y)
ret i32 %1
}
define i32 @scmp_32_64(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: scmp_32_64:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: subs.w r12, r0, r2
-; CHECK-NEXT: mov.w r9, #0
-; CHECK-NEXT: sbcs.w r12, r1, r3
-; CHECK-NEXT: mov.w r12, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: sbcs.w r0, r3, r1
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r9, #1
-; CHECK-NEXT: sub.w r0, r9, r12
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: scmp_32_64:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, lr}
+; THUMB1-NEXT: push {r4, r5, r6, lr}
+; THUMB1-NEXT: movs r4, #1
+; THUMB1-NEXT: movs r5, #0
+; THUMB1-NEXT: subs r6, r0, r2
+; THUMB1-NEXT: mov r6, r1
+; THUMB1-NEXT: sbcs r6, r3
+; THUMB1-NEXT: mov r6, r4
+; THUMB1-NEXT: blt .LBB6_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: mov r6, r5
+; THUMB1-NEXT: .LBB6_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: sbcs r3, r1
+; THUMB1-NEXT: blt .LBB6_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: mov r4, r5
+; THUMB1-NEXT: .LBB6_4:
+; THUMB1-NEXT: subs r0, r4, r6
+; THUMB1-NEXT: pop {r4, r5, r6, pc}
+;
+; THUMB2-LABEL: scmp_32_64:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs.w r12, r0, r2
+; THUMB2-NEXT: mov.w r9, #0
+; THUMB2-NEXT: sbcs.w r12, r1, r3
+; THUMB2-NEXT: mov.w r12, #0
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r12, #1
+; THUMB2-NEXT: subs r0, r2, r0
+; THUMB2-NEXT: sbcs.w r0, r3, r1
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r9, #1
+; THUMB2-NEXT: sub.w r0, r9, r12
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: scmp_32_64:
+; V81M: @ %bb.0:
+; V81M-NEXT: subs.w r12, r0, r2
+; V81M-NEXT: sbcs.w r12, r1, r3
+; V81M-NEXT: cset r12, lt
+; V81M-NEXT: subs r0, r2, r0
+; V81M-NEXT: sbcs.w r0, r3, r1
+; V81M-NEXT: cset r0, lt
+; V81M-NEXT: sub.w r0, r0, r12
+; V81M-NEXT: bx lr
%1 = call i32 @llvm.scmp(i64 %x, i64 %y)
ret i32 %1
}
define i64 @scmp_64_64(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: scmp_64_64:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: subs.w r12, r0, r2
-; CHECK-NEXT: mov.w r9, #0
-; CHECK-NEXT: sbcs.w r12, r1, r3
-; CHECK-NEXT: mov.w r12, #0
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r12, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: sbcs.w r0, r3, r1
-; CHECK-NEXT: it lt
-; CHECK-NEXT: movlt.w r9, #1
-; CHECK-NEXT: sub.w r0, r9, r12
-; CHECK-NEXT: asrs r1, r0, #31
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: scmp_64_64:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, lr}
+; THUMB1-NEXT: push {r4, r5, r6, lr}
+; THUMB1-NEXT: movs r4, #1
+; THUMB1-NEXT: movs r5, #0
+; THUMB1-NEXT: subs r6, r0, r2
+; THUMB1-NEXT: mov r6, r1
+; THUMB1-NEXT: sbcs r6, r3
+; THUMB1-NEXT: mov r6, r4
+; THUMB1-NEXT: blt .LBB7_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: mov r6, r5
+; THUMB1-NEXT: .LBB7_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: sbcs r3, r1
+; THUMB1-NEXT: blt .LBB7_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: mov r4, r5
+; THUMB1-NEXT: .LBB7_4:
+; THUMB1-NEXT: subs r0, r4, r6
+; THUMB1-NEXT: asrs r1, r0, #31
+; THUMB1-NEXT: pop {r4, r5, r6, pc}
+;
+; THUMB2-LABEL: scmp_64_64:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs.w r12, r0, r2
+; THUMB2-NEXT: mov.w r9, #0
+; THUMB2-NEXT: sbcs.w r12, r1, r3
+; THUMB2-NEXT: mov.w r12, #0
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r12, #1
+; THUMB2-NEXT: subs r0, r2, r0
+; THUMB2-NEXT: sbcs.w r0, r3, r1
+; THUMB2-NEXT: it lt
+; THUMB2-NEXT: movlt.w r9, #1
+; THUMB2-NEXT: sub.w r0, r9, r12
+; THUMB2-NEXT: asrs r1, r0, #31
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: scmp_64_64:
+; V81M: @ %bb.0:
+; V81M-NEXT: subs.w r12, r0, r2
+; V81M-NEXT: sbcs.w r12, r1, r3
+; V81M-NEXT: cset r12, lt
+; V81M-NEXT: subs r0, r2, r0
+; V81M-NEXT: sbcs.w r0, r3, r1
+; V81M-NEXT: cset r0, lt
+; V81M-NEXT: sub.w r0, r0, r12
+; V81M-NEXT: asrs r1, r0, #31
+; V81M-NEXT: bx lr
%1 = call i64 @llvm.scmp(i64 %x, i64 %y)
ret i64 %1
}
diff --git a/llvm/test/CodeGen/Thumb/ucmp.ll b/llvm/test/CodeGen/Thumb/ucmp.ll
index 7e6d0a3..5d0f57e 100644
--- a/llvm/test/CodeGen/Thumb/ucmp.ll
+++ b/llvm/test/CodeGen/Thumb/ucmp.ll
@@ -1,151 +1,376 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv6m-eabi %s -o - | FileCheck %s -check-prefix=THUMB1
+; RUN: llc -mtriple=thumbv7-apple-darwin %s -o - | FileCheck %s -check-prefix=THUMB2
+; RUN: llc -mtriple thumbv8.1m.main-none-eabi -o - %s | FileCheck %s --check-prefix=V81M
define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind {
-; CHECK-LABEL: ucmp_8_8:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo r0, #1
-; CHECK-NEXT: it hi
-; CHECK-NEXT: movhi r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: ucmp_8_8:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: subs r2, r0, r1
+; THUMB1-NEXT: sbcs r2, r2
+; THUMB1-NEXT: cmp r1, r0
+; THUMB1-NEXT: sbcs r1, r1
+; THUMB1-NEXT: subs r0, r2, r1
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: ucmp_8_8:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it hi
+; THUMB2-NEXT: movhi r0, #1
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: ucmp_8_8:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, hi
+; V81M-NEXT: it lo
+; V81M-NEXT: movlo.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i8 %x, i8 %y)
ret i8 %1
}
define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind {
-; CHECK-LABEL: ucmp_8_16:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo r0, #1
-; CHECK-NEXT: it hi
-; CHECK-NEXT: movhi r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: ucmp_8_16:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: subs r2, r0, r1
+; THUMB1-NEXT: sbcs r2, r2
+; THUMB1-NEXT: cmp r1, r0
+; THUMB1-NEXT: sbcs r1, r1
+; THUMB1-NEXT: subs r0, r2, r1
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: ucmp_8_16:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it hi
+; THUMB2-NEXT: movhi r0, #1
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: ucmp_8_16:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, hi
+; V81M-NEXT: it lo
+; V81M-NEXT: movlo.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i16 %x, i16 %y)
ret i8 %1
}
define i8 @ucmp_8_32(i32 %x, i32 %y) nounwind {
-; CHECK-LABEL: ucmp_8_32:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo r0, #1
-; CHECK-NEXT: it hi
-; CHECK-NEXT: movhi r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: ucmp_8_32:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: subs r2, r0, r1
+; THUMB1-NEXT: sbcs r2, r2
+; THUMB1-NEXT: cmp r1, r0
+; THUMB1-NEXT: sbcs r1, r1
+; THUMB1-NEXT: subs r0, r2, r1
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: ucmp_8_32:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it hi
+; THUMB2-NEXT: movhi r0, #1
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: ucmp_8_32:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, hi
+; V81M-NEXT: it lo
+; V81M-NEXT: movlo.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
ret i8 %1
}
define i8 @ucmp_8_64(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: ucmp_8_64:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: subs.w r12, r0, r2
-; CHECK-NEXT: mov.w r9, #0
-; CHECK-NEXT: sbcs.w r12, r1, r3
-; CHECK-NEXT: mov.w r12, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo.w r12, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: sbcs.w r0, r3, r1
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo.w r9, #1
-; CHECK-NEXT: sub.w r0, r9, r12
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: ucmp_8_64:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, lr}
+; THUMB1-NEXT: push {r4, r5, r6, lr}
+; THUMB1-NEXT: movs r4, #1
+; THUMB1-NEXT: movs r5, #0
+; THUMB1-NEXT: subs r6, r0, r2
+; THUMB1-NEXT: mov r6, r1
+; THUMB1-NEXT: sbcs r6, r3
+; THUMB1-NEXT: mov r6, r4
+; THUMB1-NEXT: blo .LBB3_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: mov r6, r5
+; THUMB1-NEXT: .LBB3_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: sbcs r3, r1
+; THUMB1-NEXT: blo .LBB3_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: mov r4, r5
+; THUMB1-NEXT: .LBB3_4:
+; THUMB1-NEXT: subs r0, r4, r6
+; THUMB1-NEXT: pop {r4, r5, r6, pc}
+;
+; THUMB2-LABEL: ucmp_8_64:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs.w r12, r0, r2
+; THUMB2-NEXT: mov.w r9, #0
+; THUMB2-NEXT: sbcs.w r12, r1, r3
+; THUMB2-NEXT: mov.w r12, #0
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r12, #1
+; THUMB2-NEXT: subs r0, r2, r0
+; THUMB2-NEXT: sbcs.w r0, r3, r1
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r9, #1
+; THUMB2-NEXT: sub.w r0, r9, r12
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: ucmp_8_64:
+; V81M: @ %bb.0:
+; V81M-NEXT: subs.w r12, r0, r2
+; V81M-NEXT: sbcs.w r12, r1, r3
+; V81M-NEXT: cset r12, lo
+; V81M-NEXT: subs r0, r2, r0
+; V81M-NEXT: sbcs.w r0, r3, r1
+; V81M-NEXT: cset r0, lo
+; V81M-NEXT: sub.w r0, r0, r12
+; V81M-NEXT: bx lr
%1 = call i8 @llvm.ucmp(i64 %x, i64 %y)
ret i8 %1
}
define i8 @ucmp_8_128(i128 %x, i128 %y) nounwind {
-; CHECK-LABEL: ucmp_8_128:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: push {r4, r5, r6, lr}
-; CHECK-NEXT: add.w lr, sp, #16
-; CHECK-NEXT: ldr r4, [sp, #28]
-; CHECK-NEXT: movs r5, #0
-; CHECK-NEXT: ldm.w lr, {r9, r12, lr}
-; CHECK-NEXT: subs.w r6, r0, r9
-; CHECK-NEXT: sbcs.w r6, r1, r12
-; CHECK-NEXT: sbcs.w r6, r2, lr
-; CHECK-NEXT: sbcs.w r6, r3, r4
-; CHECK-NEXT: mov.w r6, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo r6, #1
-; CHECK-NEXT: subs.w r0, r9, r0
-; CHECK-NEXT: sbcs.w r0, r12, r1
-; CHECK-NEXT: sbcs.w r0, lr, r2
-; CHECK-NEXT: sbcs.w r0, r4, r3
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo r5, #1
-; CHECK-NEXT: subs r0, r5, r6
-; CHECK-NEXT: pop {r4, r5, r6, pc}
+; THUMB1-LABEL: ucmp_8_128:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, r7, lr}
+; THUMB1-NEXT: push {r4, r5, r6, r7, lr}
+; THUMB1-NEXT: .pad #20
+; THUMB1-NEXT: sub sp, #20
+; THUMB1-NEXT: str r3, [sp, #16] @ 4-byte Spill
+; THUMB1-NEXT: movs r3, #1
+; THUMB1-NEXT: str r3, [sp] @ 4-byte Spill
+; THUMB1-NEXT: movs r3, #0
+; THUMB1-NEXT: str r3, [sp, #12] @ 4-byte Spill
+; THUMB1-NEXT: ldr r6, [sp, #52]
+; THUMB1-NEXT: add r7, sp, #40
+; THUMB1-NEXT: ldm r7, {r3, r5, r7}
+; THUMB1-NEXT: subs r4, r0, r3
+; THUMB1-NEXT: str r1, [sp, #4] @ 4-byte Spill
+; THUMB1-NEXT: mov r4, r1
+; THUMB1-NEXT: ldr r1, [sp] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r4, r5
+; THUMB1-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; THUMB1-NEXT: mov r4, r2
+; THUMB1-NEXT: sbcs r4, r7
+; THUMB1-NEXT: ldr r4, [sp, #16] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r4, r6
+; THUMB1-NEXT: mov r2, r1
+; THUMB1-NEXT: blo .LBB4_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: ldr r2, [sp, #12] @ 4-byte Reload
+; THUMB1-NEXT: .LBB4_2:
+; THUMB1-NEXT: subs r0, r3, r0
+; THUMB1-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r5, r0
+; THUMB1-NEXT: ldr r0, [sp, #8] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r7, r0
+; THUMB1-NEXT: ldr r0, [sp, #16] @ 4-byte Reload
+; THUMB1-NEXT: sbcs r6, r0
+; THUMB1-NEXT: blo .LBB4_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
+; THUMB1-NEXT: .LBB4_4:
+; THUMB1-NEXT: subs r0, r1, r2
+; THUMB1-NEXT: add sp, #20
+; THUMB1-NEXT: pop {r4, r5, r6, r7, pc}
+;
+; THUMB2-LABEL: ucmp_8_128:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: push {r4, r5, r6, lr}
+; THUMB2-NEXT: add.w lr, sp, #16
+; THUMB2-NEXT: ldr r4, [sp, #28]
+; THUMB2-NEXT: movs r5, #0
+; THUMB2-NEXT: ldm.w lr, {r9, r12, lr}
+; THUMB2-NEXT: subs.w r6, r0, r9
+; THUMB2-NEXT: sbcs.w r6, r1, r12
+; THUMB2-NEXT: sbcs.w r6, r2, lr
+; THUMB2-NEXT: sbcs.w r6, r3, r4
+; THUMB2-NEXT: mov.w r6, #0
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo r6, #1
+; THUMB2-NEXT: subs.w r0, r9, r0
+; THUMB2-NEXT: sbcs.w r0, r12, r1
+; THUMB2-NEXT: sbcs.w r0, lr, r2
+; THUMB2-NEXT: sbcs.w r0, r4, r3
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo r5, #1
+; THUMB2-NEXT: subs r0, r5, r6
+; THUMB2-NEXT: pop {r4, r5, r6, pc}
+;
+; V81M-LABEL: ucmp_8_128:
+; V81M: @ %bb.0:
+; V81M-NEXT: .save {r4, r5, r6, lr}
+; V81M-NEXT: push {r4, r5, r6, lr}
+; V81M-NEXT: ldrd r5, r4, [sp, #16]
+; V81M-NEXT: ldrd lr, r12, [sp, #24]
+; V81M-NEXT: subs r6, r0, r5
+; V81M-NEXT: sbcs.w r6, r1, r4
+; V81M-NEXT: sbcs.w r6, r2, lr
+; V81M-NEXT: sbcs.w r6, r3, r12
+; V81M-NEXT: cset r6, lo
+; V81M-NEXT: subs r0, r5, r0
+; V81M-NEXT: sbcs.w r0, r4, r1
+; V81M-NEXT: sbcs.w r0, lr, r2
+; V81M-NEXT: sbcs.w r0, r12, r3
+; V81M-NEXT: cset r0, lo
+; V81M-NEXT: subs r0, r0, r6
+; V81M-NEXT: pop {r4, r5, r6, pc}
%1 = call i8 @llvm.ucmp(i128 %x, i128 %y)
ret i8 %1
}
define i32 @ucmp_32_32(i32 %x, i32 %y) nounwind {
-; CHECK-LABEL: ucmp_32_32:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: cmp r0, r1
-; CHECK-NEXT: mov.w r0, #0
-; CHECK-NEXT: mov.w r2, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo r0, #1
-; CHECK-NEXT: it hi
-; CHECK-NEXT: movhi r2, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: ucmp_32_32:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: subs r2, r0, r1
+; THUMB1-NEXT: sbcs r2, r2
+; THUMB1-NEXT: cmp r1, r0
+; THUMB1-NEXT: sbcs r1, r1
+; THUMB1-NEXT: subs r0, r2, r1
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: ucmp_32_32:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs r0, r0, r1
+; THUMB2-NEXT: it hi
+; THUMB2-NEXT: movhi r0, #1
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r0, #-1
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: ucmp_32_32:
+; V81M: @ %bb.0:
+; V81M-NEXT: cmp r0, r1
+; V81M-NEXT: cset r0, hi
+; V81M-NEXT: it lo
+; V81M-NEXT: movlo.w r0, #-1
+; V81M-NEXT: bx lr
%1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
ret i32 %1
}
define i32 @ucmp_32_64(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: ucmp_32_64:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: subs.w r12, r0, r2
-; CHECK-NEXT: mov.w r9, #0
-; CHECK-NEXT: sbcs.w r12, r1, r3
-; CHECK-NEXT: mov.w r12, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo.w r12, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: sbcs.w r0, r3, r1
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo.w r9, #1
-; CHECK-NEXT: sub.w r0, r9, r12
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: ucmp_32_64:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, lr}
+; THUMB1-NEXT: push {r4, r5, r6, lr}
+; THUMB1-NEXT: movs r4, #1
+; THUMB1-NEXT: movs r5, #0
+; THUMB1-NEXT: subs r6, r0, r2
+; THUMB1-NEXT: mov r6, r1
+; THUMB1-NEXT: sbcs r6, r3
+; THUMB1-NEXT: mov r6, r4
+; THUMB1-NEXT: blo .LBB6_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: mov r6, r5
+; THUMB1-NEXT: .LBB6_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: sbcs r3, r1
+; THUMB1-NEXT: blo .LBB6_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: mov r4, r5
+; THUMB1-NEXT: .LBB6_4:
+; THUMB1-NEXT: subs r0, r4, r6
+; THUMB1-NEXT: pop {r4, r5, r6, pc}
+;
+; THUMB2-LABEL: ucmp_32_64:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs.w r12, r0, r2
+; THUMB2-NEXT: mov.w r9, #0
+; THUMB2-NEXT: sbcs.w r12, r1, r3
+; THUMB2-NEXT: mov.w r12, #0
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r12, #1
+; THUMB2-NEXT: subs r0, r2, r0
+; THUMB2-NEXT: sbcs.w r0, r3, r1
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r9, #1
+; THUMB2-NEXT: sub.w r0, r9, r12
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: ucmp_32_64:
+; V81M: @ %bb.0:
+; V81M-NEXT: subs.w r12, r0, r2
+; V81M-NEXT: sbcs.w r12, r1, r3
+; V81M-NEXT: cset r12, lo
+; V81M-NEXT: subs r0, r2, r0
+; V81M-NEXT: sbcs.w r0, r3, r1
+; V81M-NEXT: cset r0, lo
+; V81M-NEXT: sub.w r0, r0, r12
+; V81M-NEXT: bx lr
%1 = call i32 @llvm.ucmp(i64 %x, i64 %y)
ret i32 %1
}
define i64 @ucmp_64_64(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: ucmp_64_64:
-; CHECK: @ %bb.0:
-; CHECK-NEXT: subs.w r12, r0, r2
-; CHECK-NEXT: mov.w r9, #0
-; CHECK-NEXT: sbcs.w r12, r1, r3
-; CHECK-NEXT: mov.w r12, #0
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo.w r12, #1
-; CHECK-NEXT: subs r0, r2, r0
-; CHECK-NEXT: sbcs.w r0, r3, r1
-; CHECK-NEXT: it lo
-; CHECK-NEXT: movlo.w r9, #1
-; CHECK-NEXT: sub.w r0, r9, r12
-; CHECK-NEXT: asrs r1, r0, #31
-; CHECK-NEXT: bx lr
+; THUMB1-LABEL: ucmp_64_64:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: .save {r4, r5, r6, lr}
+; THUMB1-NEXT: push {r4, r5, r6, lr}
+; THUMB1-NEXT: movs r4, #1
+; THUMB1-NEXT: movs r5, #0
+; THUMB1-NEXT: subs r6, r0, r2
+; THUMB1-NEXT: mov r6, r1
+; THUMB1-NEXT: sbcs r6, r3
+; THUMB1-NEXT: mov r6, r4
+; THUMB1-NEXT: blo .LBB7_2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: mov r6, r5
+; THUMB1-NEXT: .LBB7_2:
+; THUMB1-NEXT: subs r0, r2, r0
+; THUMB1-NEXT: sbcs r3, r1
+; THUMB1-NEXT: blo .LBB7_4
+; THUMB1-NEXT: @ %bb.3:
+; THUMB1-NEXT: mov r4, r5
+; THUMB1-NEXT: .LBB7_4:
+; THUMB1-NEXT: subs r0, r4, r6
+; THUMB1-NEXT: asrs r1, r0, #31
+; THUMB1-NEXT: pop {r4, r5, r6, pc}
+;
+; THUMB2-LABEL: ucmp_64_64:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: subs.w r12, r0, r2
+; THUMB2-NEXT: mov.w r9, #0
+; THUMB2-NEXT: sbcs.w r12, r1, r3
+; THUMB2-NEXT: mov.w r12, #0
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r12, #1
+; THUMB2-NEXT: subs r0, r2, r0
+; THUMB2-NEXT: sbcs.w r0, r3, r1
+; THUMB2-NEXT: it lo
+; THUMB2-NEXT: movlo.w r9, #1
+; THUMB2-NEXT: sub.w r0, r9, r12
+; THUMB2-NEXT: asrs r1, r0, #31
+; THUMB2-NEXT: bx lr
+;
+; V81M-LABEL: ucmp_64_64:
+; V81M: @ %bb.0:
+; V81M-NEXT: subs.w r12, r0, r2
+; V81M-NEXT: sbcs.w r12, r1, r3
+; V81M-NEXT: cset r12, lo
+; V81M-NEXT: subs r0, r2, r0
+; V81M-NEXT: sbcs.w r0, r3, r1
+; V81M-NEXT: cset r0, lo
+; V81M-NEXT: sub.w r0, r0, r12
+; V81M-NEXT: asrs r1, r0, #31
+; V81M-NEXT: bx lr
%1 = call i64 @llvm.ucmp(i64 %x, i64 %y)
ret i64 %1
}
diff --git a/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll b/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll
index a27650f..7a90d28 100644
--- a/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll
+++ b/llvm/test/CodeGen/WebAssembly/expand-variadic-call.ll
@@ -37,52 +37,52 @@ define hidden void @copy(ptr noundef %va) {
; CHECK-NEXT: %va.addr = alloca ptr, align 4
; CHECK-NEXT: %cp = alloca ptr, align 4
; CHECK-NEXT: store ptr %va, ptr %va.addr, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cp)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %cp)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr %cp, ptr %va.addr, i32 4, i1 false)
; CHECK-NEXT: %0 = load ptr, ptr %cp, align 4
; CHECK-NEXT: call void @valist(ptr noundef %0)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cp)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %cp)
; CHECK-NEXT: ret void
;
entry:
%va.addr = alloca ptr, align 4
%cp = alloca ptr, align 4
store ptr %va, ptr %va.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %cp)
call void @llvm.va_copy.p0(ptr nonnull %cp, ptr nonnull %va.addr)
%0 = load ptr, ptr %cp, align 4
call void @valist(ptr noundef %0)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %cp)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.va_copy.p0(ptr, ptr)
declare void @valist(ptr noundef)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define hidden void @start_once(...) {
; CHECK-LABEL: define {{[^@]+}}@start_once(ptr %varargs) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %s = alloca ptr, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %s)
; CHECK-NEXT: store ptr %varargs, ptr %s, align 4
; CHECK-NEXT: %0 = load ptr, ptr %s, align 4
; CHECK-NEXT: call void @valist(ptr noundef %0)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %s)
; CHECK-NEXT: ret void
;
entry:
%s = alloca ptr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s)
call void @llvm.va_start.p0(ptr nonnull %s)
%0 = load ptr, ptr %s, align 4
call void @valist(ptr noundef %0)
call void @llvm.va_end.p0(ptr %s)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s)
ret void
}
@@ -95,23 +95,23 @@ define hidden void @start_twice(...) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %s0 = alloca ptr, align 4
; CHECK-NEXT: %s1 = alloca ptr, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s0)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s1)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %s0)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %s1)
; CHECK-NEXT: store ptr %varargs, ptr %s0, align 4
; CHECK-NEXT: %0 = load ptr, ptr %s0, align 4
; CHECK-NEXT: call void @valist(ptr noundef %0)
; CHECK-NEXT: store ptr %varargs, ptr %s1, align 4
; CHECK-NEXT: %1 = load ptr, ptr %s1, align 4
; CHECK-NEXT: call void @valist(ptr noundef %1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s0)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %s1)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull %s0)
; CHECK-NEXT: ret void
;
entry:
%s0 = alloca ptr, align 4
%s1 = alloca ptr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s0)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s1)
call void @llvm.va_start.p0(ptr nonnull %s0)
%0 = load ptr, ptr %s0, align 4
call void @valist(ptr noundef %0)
@@ -120,8 +120,8 @@ entry:
%1 = load ptr, ptr %s1, align 4
call void @valist(ptr noundef %1)
call void @llvm.va_end.p0(ptr %s1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s0)
ret void
}
@@ -129,11 +129,11 @@ define hidden void @single_i32(i32 noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_i32(i32 noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_i32.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -147,11 +147,11 @@ define hidden void @single_double(double noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_double(double noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_double.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_double.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store double %x, ptr %0, align 8
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -163,11 +163,11 @@ define hidden void @single_v4f32(<4 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v4f32(<4 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v4f32.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v4f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <4 x float> %x, ptr %0, align 16
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -179,11 +179,11 @@ define hidden void @single_v8f32(<8 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v8f32(<8 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v8f32.vararg, align 32
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v8f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <8 x float> %x, ptr %0, align 32
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -195,11 +195,11 @@ define hidden void @single_v16f32(<16 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v16f32(<16 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v16f32.vararg, align 64
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v16f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <16 x float> %x, ptr %0, align 64
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -211,11 +211,11 @@ define hidden void @single_v32f32(<32 x float> noundef %x) {
; CHECK-LABEL: define {{[^@]+}}@single_v32f32(<32 x float> noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %single_v32f32.vararg, align 128
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 128, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %single_v32f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <32 x float> %x, ptr %0, align 128
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 128, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -227,13 +227,13 @@ define hidden void @i32_double(i32 noundef %x, double noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_double(i32 noundef %x, double noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_double.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_double.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_double.vararg, ptr %vararg_buffer, i32 0, i32 2
; CHECK-NEXT: store double %y, ptr %1, align 8
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -245,13 +245,13 @@ define hidden void @double_i32(double noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@double_i32(double noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %double_i32.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %double_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store double %x, ptr %0, align 8
; CHECK-NEXT: %1 = getelementptr inbounds nuw %double_i32.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr %1, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -265,13 +265,13 @@ define hidden void @i32_libcS(i32 noundef %x, ptr noundef byval(%struct.libcS) a
; CHECK-NEXT: %IndirectAlloca = alloca %struct.libcS, align 8
; CHECK-NEXT: %vararg_buffer = alloca %i32_libcS.vararg, align 16
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %IndirectAlloca, ptr %y, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_libcS.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_libcS.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store ptr %IndirectAlloca, ptr %1, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -285,13 +285,13 @@ define hidden void @libcS_i32(ptr noundef byval(%struct.libcS) align 8 %x, i32 n
; CHECK-NEXT: %IndirectAlloca = alloca %struct.libcS, align 8
; CHECK-NEXT: %vararg_buffer = alloca %libcS_i32.vararg, align 16
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %IndirectAlloca, ptr %x, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %libcS_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store ptr %IndirectAlloca, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %libcS_i32.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr %1, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -303,13 +303,13 @@ define hidden void @i32_v4f32(i32 noundef %x, <4 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v4f32(i32 noundef %x, <4 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v4f32.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v4f32.vararg, ptr %vararg_buffer, i32 0, i32 2
; CHECK-NEXT: store <4 x float> %y, ptr %1, align 16
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -321,13 +321,13 @@ define hidden void @v4f32_i32(<4 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v4f32_i32(<4 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v4f32_i32.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <4 x float> %x, ptr %0, align 16
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v4f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr %1, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -339,13 +339,13 @@ define hidden void @i32_v8f32(i32 noundef %x, <8 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v8f32(i32 noundef %x, <8 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v8f32.vararg, align 32
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v8f32.vararg, ptr %vararg_buffer, i32 0, i32 2
; CHECK-NEXT: store <8 x float> %y, ptr %1, align 32
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -357,13 +357,13 @@ define hidden void @v8f32_i32(<8 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v8f32_i32(<8 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v8f32_i32.vararg, align 32
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 36, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <8 x float> %x, ptr %0, align 32
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v8f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr %1, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 36, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -375,13 +375,13 @@ define hidden void @i32_v16f32(i32 noundef %x, <16 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v16f32(i32 noundef %x, <16 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v16f32.vararg, align 64
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 128, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v16f32.vararg, ptr %vararg_buffer, i32 0, i32 2
; CHECK-NEXT: store <16 x float> %y, ptr %1, align 64
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 128, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -393,13 +393,13 @@ define hidden void @v16f32_i32(<16 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v16f32_i32(<16 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v16f32_i32.vararg, align 64
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 68, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <16 x float> %x, ptr %0, align 64
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v16f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr %1, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 68, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -411,13 +411,13 @@ define hidden void @i32_v32f32(i32 noundef %x, <32 x float> noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@i32_v32f32(i32 noundef %x, <32 x float> noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %i32_v32f32.vararg, align 128
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds nuw %i32_v32f32.vararg, ptr %vararg_buffer, i32 0, i32 2
; CHECK-NEXT: store <32 x float> %y, ptr %1, align 128
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -429,13 +429,13 @@ define hidden void @v32f32_i32(<32 x float> noundef %x, i32 noundef %y) {
; CHECK-LABEL: define {{[^@]+}}@v32f32_i32(<32 x float> noundef %x, i32 noundef %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %v32f32_i32.vararg, align 128
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 132, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store <32 x float> %x, ptr %0, align 128
; CHECK-NEXT: %1 = getelementptr inbounds nuw %v32f32_i32.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store i32 %y, ptr %1, align 4
; CHECK-NEXT: call void @vararg(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 132, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -448,11 +448,11 @@ define hidden void @fptr_single_i32(i32 noundef %x) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %fptr_single_i32.vararg, align 16
; CHECK-NEXT: %0 = load volatile ptr, ptr @vararg_ptr, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_single_i32.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %1, align 4
; CHECK-NEXT: call void %0(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
@@ -468,11 +468,11 @@ define hidden void @fptr_libcS(ptr noundef byval(%struct.libcS) align 8 %x) {
; CHECK-NEXT: %vararg_buffer = alloca %fptr_libcS.vararg, align 16
; CHECK-NEXT: %0 = load volatile ptr, ptr @vararg_ptr, align 4
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr %IndirectAlloca, ptr %x, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %1 = getelementptr inbounds nuw %fptr_libcS.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store ptr %IndirectAlloca, ptr %1, align 4
; CHECK-NEXT: call void %0(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll
index 0f968de..3264fe9 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-sjlj-alloca.ll
@@ -18,7 +18,7 @@ define void @test_static() {
; CHECK-NEXT: i32 1, label %[[ENTRY_SPLIT_SPLIT:.*]]
; CHECK-NEXT: ]
; CHECK: [[ENTRY_SPLIT]]:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
; CHECK-NEXT: call void @__wasm_setjmp(ptr @buf, i32 1, ptr [[FUNCTIONINVOCATIONID]])
; CHECK-NEXT: br label %[[ENTRY_SPLIT_SPLIT]]
; CHECK: [[ENTRY_SPLIT_SPLIT]]:
@@ -31,7 +31,7 @@ define void @test_static() {
; CHECK: [[_NOEXC:.*:]]
; CHECK-NEXT: ret void
; CHECK: [[ELSE]]:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
; CHECK-NEXT: ret void
; CHECK: [[CATCH_DISPATCH_LONGJMP]]:
; CHECK-NEXT: [[TMP0:%.*]] = catchswitch within none [label %catch.longjmp] unwind to caller
@@ -53,7 +53,7 @@ define void @test_static() {
;
entry:
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
%call = call i32 @setjmp(ptr @buf) returns_twice
%cmp = icmp eq i32 %call, 0
br i1 %cmp, label %if, label %else
@@ -63,7 +63,7 @@ if:
ret void
else:
- call void @llvm.lifetime.end.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
ret void
}
@@ -114,7 +114,7 @@ define void @test_dynamic(i32 %size) {
;
entry:
%x = alloca i32, i32 %size, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
%call = call i32 @setjmp(ptr @buf) returns_twice
%cmp = icmp eq i32 %call, 0
br i1 %cmp, label %if, label %else
@@ -124,6 +124,6 @@ if:
ret void
else:
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
ret void
}
diff --git a/llvm/test/CodeGen/WebAssembly/returned.ll b/llvm/test/CodeGen/WebAssembly/returned.ll
index aef75d8..bad9d60 100644
--- a/llvm/test/CodeGen/WebAssembly/returned.ll
+++ b/llvm/test/CodeGen/WebAssembly/returned.ll
@@ -99,8 +99,8 @@ define void @test() {
; CHECK-NEXT: return
entry:
%a = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
%ret = call ptr @returns_arg(ptr %a)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
diff --git a/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll b/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll
index 2ca99bd..58dfd63 100644
--- a/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll
+++ b/llvm/test/CodeGen/X86/pr140491-sincos-lifetimes.ll
@@ -51,20 +51,20 @@ entry:
%sincos = tail call { float, float } @llvm.sincos.f32(float %in)
%sin = extractvalue { float, float } %sincos, 0
%cos = extractvalue { float, float } %sincos, 1
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %computed)
+ call void @llvm.lifetime.start.p0(ptr nonnull %computed)
store float %cos, ptr %computed, align 4
call void @use_ptr(ptr nonnull %computed)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %computed)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %computed1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %computed)
+ call void @llvm.lifetime.start.p0(ptr nonnull %computed1)
%fneg_sin = fneg float %sin
store float %fneg_sin, ptr %computed1, align 4
call void @use_ptr(ptr nonnull %computed1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %computed1)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %computed3)
+ call void @llvm.lifetime.end.p0(ptr nonnull %computed1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %computed3)
%fneg_cos = fneg float %cos
store float %fneg_cos, ptr %computed3, align 4
call void @use_ptr(ptr nonnull %computed3)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %computed3)
+ call void @llvm.lifetime.end.p0(ptr nonnull %computed3)
ret i32 0
}
diff --git a/llvm/test/CodeGen/X86/select-optimize.ll b/llvm/test/CodeGen/X86/select-optimize.ll
index c7cf9cb..6cb49f2 100644
--- a/llvm/test/CodeGen/X86/select-optimize.ll
+++ b/llvm/test/CodeGen/X86/select-optimize.ll
@@ -233,7 +233,7 @@ define i32 @expensive_val_operand5(i32 %b, i32 %y, i1 %cmp) {
; CHECK-LABEL: @expensive_val_operand5(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[A]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: [[CMP_FROZEN:%.*]] = freeze i1 [[CMP:%.*]]
; CHECK-NEXT: br i1 [[CMP_FROZEN]], label [[SELECT_TRUE_SINK:%.*]], label [[SELECT_END:%.*]], !prof [[PROF18]]
; CHECK: select.true.sink:
@@ -245,7 +245,7 @@ define i32 @expensive_val_operand5(i32 %b, i32 %y, i1 %cmp) {
;
%a = alloca i32
%load = load i32, ptr %a, align 8
- call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
%x = add i32 %load, %b
%sel = select i1 %cmp, i32 %x, i32 %y, !prof !17
ret i32 %sel
@@ -520,7 +520,7 @@ for.body: ; preds = %for.body.preheader,
declare void @llvm.dbg.value(metadata, metadata, metadata)
; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @free(ptr nocapture)
diff --git a/llvm/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll b/llvm/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll
index 3685d8d..aad8940 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/lifetime-uar-uas.ll
@@ -8,9 +8,6 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
-
define i32 @basic_test(i64 %i) sanitize_address {
; CHECK-LABEL: define i32 @basic_test(
@@ -22,7 +19,7 @@ entry:
; CHECK-UAS: store i64 -868082052615769615, ptr %{{[0-9]+}}
; CHECK-UAS-SS-NOT: store i64
- call void @llvm.lifetime.start.p0(i64 2, ptr %c)
+ call void @llvm.lifetime.start.p0(ptr %c)
; Memory is unpoisoned at llvm.lifetime.start: 01
; CHECK-UAS: store i8 2, ptr %{{[0-9]+}}
@@ -30,7 +27,7 @@ entry:
store volatile i32 0, ptr %retval
store volatile i8 0, ptr %ci, align 1
- call void @llvm.lifetime.end.p0(i64 2, ptr %c)
+ call void @llvm.lifetime.end.p0(ptr %c)
; Memory is poisoned at llvm.lifetime.end: F8
; CHECK-UAS: store i8 -8, ptr %{{[0-9]+}}
; CHECK-UAS-SS-NOT: store i8 -8,
diff --git a/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll b/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll
index 9594370..d1e0180 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll
@@ -9,73 +9,10 @@ target triple = "x86_64-unknown-linux-gnu"
declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
-define void @lifetime_no_size(i64 %i) sanitize_address {
-; CHECK-LABEL: define void @lifetime_no_size(
-; CHECK-SAME: i64 [[I:%.*]]) #[[ATTR1:[0-9]+]] {
-; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 64, align 32
-; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[MYALLOCA]] to i64
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 32
-; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
-; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP0]] to ptr
-; CHECK-NEXT: store i64 1102416563, ptr [[TMP3]], align 8
-; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP0]], 8
-; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
-; CHECK-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack to i64), ptr [[TMP5]], align 8
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP0]], 16
-; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
-; CHECK-NEXT: store i64 ptrtoint (ptr @lifetime_no_size to i64), ptr [[TMP7]], align 8
-; CHECK-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP0]], 3
-; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], 2147450880
-; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], 0
-; CHECK-NEXT: [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; CHECK-NEXT: store i64 -868083117767659023, ptr [[TMP11]], align 1
-; CHECK-NEXT: [[AI:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP2]], i64 0, i64 [[I]]
-; CHECK-NEXT: [[TMP12:%.*]] = ptrtoint ptr [[AI]] to i64
-; CHECK-NEXT: [[TMP13:%.*]] = lshr i64 [[TMP12]], 3
-; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP13]], 2147450880
-; CHECK-NEXT: [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
-; CHECK-NEXT: [[TMP16:%.*]] = load i8, ptr [[TMP15]], align 1
-; CHECK-NEXT: [[TMP17:%.*]] = icmp ne i8 [[TMP16]], 0
-; CHECK-NEXT: br i1 [[TMP17]], label %[[BB18:.*]], label %[[BB23:.*]], !prof [[PROF1:![0-9]+]]
-; CHECK: [[BB18]]:
-; CHECK-NEXT: [[TMP19:%.*]] = and i64 [[TMP12]], 7
-; CHECK-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP19]] to i8
-; CHECK-NEXT: [[TMP21:%.*]] = icmp sge i8 [[TMP20]], [[TMP16]]
-; CHECK-NEXT: br i1 [[TMP21]], label %[[BB22:.*]], label %[[BB23]]
-; CHECK: [[BB22]]:
-; CHECK-NEXT: call void @__asan_report_store1(i64 [[TMP12]]) #[[ATTR4:[0-9]+]]
-; CHECK-NEXT: unreachable
-; CHECK: [[BB23]]:
-; CHECK-NEXT: store volatile i8 0, ptr [[AI]], align 4
-; CHECK-NEXT: store i64 1172321806, ptr [[TMP3]], align 8
-; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP9]], 0
-; CHECK-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; CHECK-NEXT: store i64 0, ptr [[TMP25]], align 1
-; CHECK-NEXT: ret void
-;
-entry:
- %a = alloca [2 x i32], align 4
-
- ; Poison memory in prologue: 0xf3f3f300f1f1f1f1
-
- call void @llvm.lifetime.start.p0(i64 -1, ptr %a)
- ; Check that lifetime with no size are ignored.
-
- %ai = getelementptr inbounds [2 x i32], ptr %a, i64 0, i64 %i
- store volatile i8 0, ptr %ai, align 4
-
- call void @llvm.lifetime.end.p0(i64 -1, ptr %a)
- ; Check that lifetime with no size are ignored.
-
- ; Unpoison stack frame on exit.
- ret void
-}
-
; Generic case of lifetime analysis.
define void @lifetime() sanitize_address {
; CHECK-DEFAULT-LABEL: define void @lifetime(
-; CHECK-DEFAULT-SAME: ) #[[ATTR1]] {
+; CHECK-DEFAULT-SAME: ) #[[ATTR0:[0-9]+]] {
; CHECK-DEFAULT-NEXT: [[TMP1:%.*]] = alloca i64, align 32
; CHECK-DEFAULT-NEXT: store i64 0, ptr [[TMP1]], align 8
; CHECK-DEFAULT-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 64, align 32
@@ -86,7 +23,7 @@ define void @lifetime() sanitize_address {
; CHECK-DEFAULT-NEXT: store i64 1102416563, ptr [[TMP5]], align 8
; CHECK-DEFAULT-NEXT: [[TMP6:%.*]] = add i64 [[TMP2]], 8
; CHECK-DEFAULT-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
-; CHECK-DEFAULT-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack.1 to i64), ptr [[TMP7]], align 8
+; CHECK-DEFAULT-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack to i64), ptr [[TMP7]], align 8
; CHECK-DEFAULT-NEXT: [[TMP8:%.*]] = add i64 [[TMP2]], 16
; CHECK-DEFAULT-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-DEFAULT-NEXT: store i64 ptrtoint (ptr @lifetime to i64), ptr [[TMP9]], align 8
@@ -104,14 +41,14 @@ define void @lifetime() sanitize_address {
; CHECK-DEFAULT-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
; CHECK-DEFAULT-NEXT: [[TMP20:%.*]] = load i8, ptr [[TMP19]], align 1
; CHECK-DEFAULT-NEXT: [[TMP21:%.*]] = icmp ne i8 [[TMP20]], 0
-; CHECK-DEFAULT-NEXT: br i1 [[TMP21]], label %[[BB22:.*]], label %[[BB27:.*]], !prof [[PROF1]]
+; CHECK-DEFAULT-NEXT: br i1 [[TMP21]], label %[[BB22:.*]], label %[[BB27:.*]], !prof [[PROF1:![0-9]+]]
; CHECK-DEFAULT: [[BB22]]:
; CHECK-DEFAULT-NEXT: [[TMP23:%.*]] = and i64 [[TMP16]], 7
; CHECK-DEFAULT-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i8
; CHECK-DEFAULT-NEXT: [[TMP25:%.*]] = icmp sge i8 [[TMP24]], [[TMP20]]
; CHECK-DEFAULT-NEXT: br i1 [[TMP25]], label %[[BB26:.*]], label %[[BB27]]
; CHECK-DEFAULT: [[BB26]]:
-; CHECK-DEFAULT-NEXT: call void @__asan_report_store1(i64 [[TMP16]]) #[[ATTR4]]
+; CHECK-DEFAULT-NEXT: call void @__asan_report_store1(i64 [[TMP16]]) #[[ATTR4:[0-9]+]]
; CHECK-DEFAULT-NEXT: unreachable
; CHECK-DEFAULT: [[BB27]]:
; CHECK-DEFAULT-NEXT: store volatile i8 0, ptr [[TMP4]], align 1
@@ -182,7 +119,7 @@ define void @lifetime() sanitize_address {
; CHECK-DEFAULT-NEXT: ret void
;
; CHECK-NO-DYNAMIC-LABEL: define void @lifetime(
-; CHECK-NO-DYNAMIC-SAME: ) #[[ATTR1]] {
+; CHECK-NO-DYNAMIC-SAME: ) #[[ATTR0:[0-9]+]] {
; CHECK-NO-DYNAMIC-NEXT: [[MYALLOCA:%.*]] = alloca i8, i64 64, align 32
; CHECK-NO-DYNAMIC-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[MYALLOCA]] to i64
; CHECK-NO-DYNAMIC-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], 32
@@ -191,7 +128,7 @@ define void @lifetime() sanitize_address {
; CHECK-NO-DYNAMIC-NEXT: store i64 1102416563, ptr [[TMP4]], align 8
; CHECK-NO-DYNAMIC-NEXT: [[TMP5:%.*]] = add i64 [[TMP1]], 8
; CHECK-NO-DYNAMIC-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
-; CHECK-NO-DYNAMIC-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack.1 to i64), ptr [[TMP6]], align 8
+; CHECK-NO-DYNAMIC-NEXT: store i64 ptrtoint (ptr @___asan_gen_stack to i64), ptr [[TMP6]], align 8
; CHECK-NO-DYNAMIC-NEXT: [[TMP7:%.*]] = add i64 [[TMP1]], 16
; CHECK-NO-DYNAMIC-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
; CHECK-NO-DYNAMIC-NEXT: store i64 ptrtoint (ptr @lifetime to i64), ptr [[TMP8]], align 8
@@ -209,14 +146,14 @@ define void @lifetime() sanitize_address {
; CHECK-NO-DYNAMIC-NEXT: [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
; CHECK-NO-DYNAMIC-NEXT: [[TMP19:%.*]] = load i8, ptr [[TMP18]], align 1
; CHECK-NO-DYNAMIC-NEXT: [[TMP20:%.*]] = icmp ne i8 [[TMP19]], 0
-; CHECK-NO-DYNAMIC-NEXT: br i1 [[TMP20]], label %[[BB21:.*]], label %[[BB26:.*]], !prof [[PROF1]]
+; CHECK-NO-DYNAMIC-NEXT: br i1 [[TMP20]], label %[[BB21:.*]], label %[[BB26:.*]], !prof [[PROF1:![0-9]+]]
; CHECK-NO-DYNAMIC: [[BB21]]:
; CHECK-NO-DYNAMIC-NEXT: [[TMP22:%.*]] = and i64 [[TMP15]], 7
; CHECK-NO-DYNAMIC-NEXT: [[TMP23:%.*]] = trunc i64 [[TMP22]] to i8
; CHECK-NO-DYNAMIC-NEXT: [[TMP24:%.*]] = icmp sge i8 [[TMP23]], [[TMP19]]
; CHECK-NO-DYNAMIC-NEXT: br i1 [[TMP24]], label %[[BB25:.*]], label %[[BB26]]
; CHECK-NO-DYNAMIC: [[BB25]]:
-; CHECK-NO-DYNAMIC-NEXT: call void @__asan_report_store1(i64 [[TMP15]]) #[[ATTR4]]
+; CHECK-NO-DYNAMIC-NEXT: call void @__asan_report_store1(i64 [[TMP15]]) #[[ATTR4:[0-9]+]]
; CHECK-NO-DYNAMIC-NEXT: unreachable
; CHECK-NO-DYNAMIC: [[BB26]]:
; CHECK-NO-DYNAMIC-NEXT: store volatile i8 0, ptr [[TMP3]], align 1
@@ -227,7 +164,7 @@ define void @lifetime() sanitize_address {
; CHECK-NO-DYNAMIC-NEXT: [[TMP30:%.*]] = inttoptr i64 [[TMP29]] to ptr
; CHECK-NO-DYNAMIC-NEXT: store i8 -8, ptr [[TMP30]], align 1
; CHECK-NO-DYNAMIC-NEXT: [[ARR:%.*]] = alloca [10 x i32], align 16
-; CHECK-NO-DYNAMIC-NEXT: call void @llvm.lifetime.start.p0(i64 40, ptr [[ARR]])
+; CHECK-NO-DYNAMIC-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]])
; CHECK-NO-DYNAMIC-NEXT: [[TMP31:%.*]] = ptrtoint ptr [[ARR]] to i64
; CHECK-NO-DYNAMIC-NEXT: [[TMP32:%.*]] = lshr i64 [[TMP31]], 3
; CHECK-NO-DYNAMIC-NEXT: [[TMP33:%.*]] = add i64 [[TMP32]], 2147450880
@@ -245,7 +182,7 @@ define void @lifetime() sanitize_address {
; CHECK-NO-DYNAMIC-NEXT: unreachable
; CHECK-NO-DYNAMIC: [[BB42]]:
; CHECK-NO-DYNAMIC-NEXT: store volatile i8 0, ptr [[ARR]], align 1
-; CHECK-NO-DYNAMIC-NEXT: call void @llvm.lifetime.end.p0(i64 40, ptr [[ARR]])
+; CHECK-NO-DYNAMIC-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
; CHECK-NO-DYNAMIC-NEXT: [[TMP43:%.*]] = add i64 [[TMP10]], 4
; CHECK-NO-DYNAMIC-NEXT: [[TMP44:%.*]] = inttoptr i64 [[TMP43]] to ptr
; CHECK-NO-DYNAMIC-NEXT: store i8 4, ptr [[TMP44]], align 1
@@ -318,8 +255,8 @@ define void @zero_sized(i64 %a) #0 {
; CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
; CHECK-NEXT: [[B:%.*]] = alloca [0 x i8], align 1
; CHECK-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 0, ptr [[B]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 0, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Instrumentation/AddressSanitizer/remove-memory-effects.ll b/llvm/test/Instrumentation/AddressSanitizer/remove-memory-effects.ll
index 481e780..07b28f4 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/remove-memory-effects.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/remove-memory-effects.ll
@@ -10,9 +10,9 @@ declare void @foo(ptr writeonly) memory(argmem: write)
define void @bar() sanitize_address {
entry:
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
call void @foo(ptr %x)
- call void @llvm.lifetime.end.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
ret void
}
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/exception-lifetime.ll b/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/exception-lifetime.ll
index ac5d8b8..37b280c 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/exception-lifetime.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/exception-lifetime.ll
@@ -10,8 +10,8 @@ target triple = "riscv64-unknown-linux"
declare void @mayFail(ptr %x) sanitize_hwaddress
declare void @onExcept(ptr %x) sanitize_hwaddress
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
declare i32 @__gxx_personality_v0(...)
define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
@@ -46,7 +46,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[X_HWASAN:%.*]] = inttoptr i64 [[TMP19]] to ptr
; CHECK-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
; CHECK-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP15]] to i8
; CHECK-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[X]] to i64
; CHECK-NEXT: [[TMP22:%.*]] = and i64 [[TMP21]], 72057594037927935
@@ -65,7 +65,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[TMP30:%.*]] = lshr i64 [[TMP29]], 4
; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP30]]
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP31]], i8 [[TMP27]], i64 1, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
; CHECK-NEXT: ret void
; CHECK: lpad:
; CHECK-NEXT: [[TMP32:%.*]] = landingpad { ptr, i32 }
@@ -81,7 +81,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[TMP38:%.*]] = lshr i64 [[TMP37]], 4
; CHECK-NEXT: [[TMP39:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP38]]
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP39]], i8 [[TMP35]], i64 1, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
; CHECK-NEXT: br label [[EH_RESUME:%.*]]
; CHECK: eh.resume:
; CHECK-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
@@ -94,12 +94,12 @@ entry:
%x = alloca i32, align 8
%exn.slot = alloca ptr, align 8
%ehselector.slot = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
invoke void @mayFail(ptr %x) to label %invoke.cont unwind label %lpad
invoke.cont: ; preds = %entry
- call void @llvm.lifetime.end.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
ret void
lpad: ; preds = %entry
@@ -111,7 +111,7 @@ lpad: ; preds = %entry
%2 = extractvalue { ptr, i32 } %0, 1
store i32 %2, ptr %ehselector.slot, align 4
call void @onExcept(ptr %x) #18
- call void @llvm.lifetime.end.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
br label %eh.resume
eh.resume: ; preds = %lpad
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/use-after-scope-setjmp.ll b/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/use-after-scope-setjmp.ll
index db78c1f..d2949bf 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/use-after-scope-setjmp.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/RISCV/use-after-scope-setjmp.ll
@@ -78,13 +78,13 @@ sw.bb1: ; preds = %entry
br label %return
while.body: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 4096, ptr nonnull %buf) #10
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf) #10
store ptr %buf, ptr @stackbuf, align 8
; may_jump may call longjmp, going back to the switch (and then the return),
; bypassing the lifetime.end. This is why we need to untag on the return,
; rather than the lifetime.end.
call void @may_jump()
- call void @llvm.lifetime.end.p0(i64 4096, ptr nonnull %buf) #10
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf) #10
br label %return
return: ; preds = %entry, %while.body, %sw.bb1
@@ -94,5 +94,5 @@ return: ; preds = %entry, %while.body,
declare i32 @setjmp(ptr noundef) returns_twice
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/X86/alloca.ll b/llvm/test/Instrumentation/HWAddressSanitizer/X86/alloca.ll
index 292a565..ef86e63 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/X86/alloca.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/X86/alloca.ll
@@ -109,7 +109,7 @@ define i32 @test_simple(ptr %a) sanitize_hwaddress {
; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP5]], 57
; CHECK-NEXT: [[TMP9:%.*]] = or i64 [[TMP7]], [[TMP8]]
; CHECK-NEXT: [[BUF_SROA_0_HWASAN:%.*]] = inttoptr i64 [[TMP9]] to ptr
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[BUF_SROA_0]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BUF_SROA_0]])
; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP5]] to i8
; CHECK-NEXT: call void @__hwasan_tag_memory(ptr [[BUF_SROA_0]], i8 [[TMP10]], i64 16)
; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[BUF_SROA_0_HWASAN]] to i64
@@ -117,7 +117,7 @@ define i32 @test_simple(ptr %a) sanitize_hwaddress {
; CHECK-NEXT: store volatile i8 0, ptr [[BUF_SROA_0_HWASAN]], align 4
; CHECK-NEXT: [[TMP12:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
; CHECK-NEXT: call void @__hwasan_tag_memory(ptr [[BUF_SROA_0]], i8 [[TMP12]], i64 16)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[BUF_SROA_0]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BUF_SROA_0]])
; CHECK-NEXT: ret i32 0
;
; INLINE-LABEL: define i32 @test_simple
@@ -150,7 +150,7 @@ define i32 @test_simple(ptr %a) sanitize_hwaddress {
; INLINE-NEXT: [[TMP19:%.*]] = shl i64 [[TMP16]], 57
; INLINE-NEXT: [[TMP20:%.*]] = or i64 [[TMP18]], [[TMP19]]
; INLINE-NEXT: [[BUF_SROA_0_HWASAN:%.*]] = inttoptr i64 [[TMP20]] to ptr
-; INLINE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[BUF_SROA_0]])
+; INLINE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BUF_SROA_0]])
; INLINE-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP16]] to i8
; INLINE-NEXT: [[TMP22:%.*]] = ptrtoint ptr [[BUF_SROA_0]] to i64
; INLINE-NEXT: [[TMP23:%.*]] = and i64 [[TMP22]], -9079256848778919937
@@ -197,19 +197,19 @@ define i32 @test_simple(ptr %a) sanitize_hwaddress {
; INLINE-NEXT: [[TMP54:%.*]] = lshr i64 [[TMP53]], 4
; INLINE-NEXT: [[TMP55:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP54]]
; INLINE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP55]], i8 [[TMP51]], i64 1, i1 false)
-; INLINE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[BUF_SROA_0]])
+; INLINE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BUF_SROA_0]])
; INLINE-NEXT: ret i32 0
;
entry:
%buf.sroa.0 = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
store volatile i8 0, ptr %buf.sroa.0, align 4
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/exception-lifetime.ll b/llvm/test/Instrumentation/HWAddressSanitizer/exception-lifetime.ll
index 3e13eb4..f2ba94c 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/exception-lifetime.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/exception-lifetime.ll
@@ -10,8 +10,8 @@ target triple = "aarch64--linux-android"
declare void @mayFail(ptr %x) sanitize_hwaddress
declare void @onExcept(ptr %x) sanitize_hwaddress
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
declare i32 @__gxx_personality_v0(...)
define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
@@ -48,7 +48,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[X_HWASAN:%.*]] = inttoptr i64 [[TMP21]] to ptr
; CHECK-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
; CHECK-NEXT: [[TMP22:%.*]] = trunc i64 [[TMP17]] to i8
; CHECK-NEXT: [[TMP23:%.*]] = ptrtoint ptr [[X]] to i64
; CHECK-NEXT: [[TMP24:%.*]] = and i64 [[TMP23]], 72057594037927935
@@ -64,7 +64,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[TMP30:%.*]] = lshr i64 [[TMP29]], 4
; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i8, ptr [[TMP16]], i64 [[TMP30]]
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP31]], i8 [[TMP27]], i64 1, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
; CHECK-NEXT: ret void
; CHECK: lpad:
; CHECK-NEXT: [[TMP32:%.*]] = landingpad { ptr, i32 }
@@ -82,7 +82,7 @@ define void @test() sanitize_hwaddress personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[TMP39:%.*]] = lshr i64 [[TMP38]], 4
; CHECK-NEXT: [[TMP40:%.*]] = getelementptr i8, ptr [[TMP16]], i64 [[TMP39]]
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP40]], i8 [[TMP36]], i64 1, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
; CHECK-NEXT: br label [[EH_RESUME:%.*]]
; CHECK: eh.resume:
; CHECK-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8
@@ -95,12 +95,12 @@ entry:
%x = alloca i32, align 8
%exn.slot = alloca ptr, align 8
%ehselector.slot = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
invoke void @mayFail(ptr %x) to label %invoke.cont unwind label %lpad
invoke.cont: ; preds = %entry
- call void @llvm.lifetime.end.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
ret void
lpad: ; preds = %entry
@@ -112,7 +112,7 @@ lpad: ; preds = %entry
%2 = extractvalue { ptr, i32 } %0, 1
store i32 %2, ptr %ehselector.slot, align 4
call void @onExcept(ptr %x) #18
- call void @llvm.lifetime.end.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
br label %eh.resume
eh.resume: ; preds = %lpad
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/stack-coloring.ll b/llvm/test/Instrumentation/HWAddressSanitizer/stack-coloring.ll
index ae6fe57..a40d964 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/stack-coloring.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/stack-coloring.ll
@@ -16,22 +16,22 @@ define i32 @myCall_w2(i32 %in) sanitize_hwaddress {
entry:
%a = alloca [17 x ptr], align 8
%a2 = alloca [16 x ptr], align 8
- call void @llvm.lifetime.start.p0(i64 136, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
%t1 = call i32 @foo(i32 %in, ptr %a)
%t2 = call i32 @foo(i32 %in, ptr %a)
- call void @llvm.lifetime.end.p0(i64 136, ptr %a)
- call void @llvm.lifetime.start.p0(i64 128, ptr %a2)
+ call void @llvm.lifetime.end.p0(ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a2)
%t3 = call i32 @foo(i32 %in, ptr %a2)
%t4 = call i32 @foo(i32 %in, ptr %a2)
- call void @llvm.lifetime.end.p0(i64 128, ptr %a2)
+ call void @llvm.lifetime.end.p0(ptr %a2)
%t5 = add i32 %t1, %t2
%t6 = add i32 %t3, %t4
%t7 = add i32 %t5, %t6
ret i32 %t7
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
declare i32 @foo(i32, ptr)
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll b/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll
index 60af551..a76566b 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/stack-safety-analysis.ll
@@ -24,9 +24,9 @@ entry:
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_simple
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_simple
%buf.sroa.0 = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
store volatile i8 0, ptr %buf.sroa.0, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -43,9 +43,9 @@ entry:
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_cmpxchg
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_cmpxchg
%buf.sroa.0 = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
%0 = cmpxchg ptr %buf.sroa.0, i8 1, i8 2 monotonic monotonic, align 4
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -62,9 +62,9 @@ entry:
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_atomicrwm
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_atomicrwm
%buf.sroa.0 = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
%0 = atomicrmw add ptr %buf.sroa.0, i8 1 monotonic, align 4
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -82,9 +82,9 @@ entry:
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_use
%buf.sroa.0 = alloca i8, align 4
call void @use(ptr nonnull %buf.sroa.0)
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
store volatile i8 0, ptr %buf.sroa.0, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -101,9 +101,9 @@ entry:
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_in_range
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_in_range
%buf.sroa.0 = alloca [10 x i8], align 4
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
store volatile i8 0, ptr %buf.sroa.0, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -121,9 +121,9 @@ entry:
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_in_range2
%buf.sroa.0 = alloca [10 x i8], align 4
%ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 9
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
store volatile i8 0, ptr %ptr, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -194,9 +194,9 @@ entry:
; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_out_of_range
%buf.sroa.0 = alloca [10 x i8], align 4
%ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 10
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
store volatile i8 0, ptr %ptr, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -213,9 +213,9 @@ entry:
; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_out_of_range2
%buf.sroa.0 = alloca [10 x i8], align 4
%ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 10
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
%0 = cmpxchg ptr %ptr, i8 1, i8 2 monotonic monotonic, align 4
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -268,11 +268,11 @@ entry:
%ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 9
%buf.sroa.1 = alloca [10 x i8], align 4
%ptr1 = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 9
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0)
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0)
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.1)
call void @llvm.memmove.p0.p0.i32(ptr %ptr, ptr %ptr1, i32 1, i1 true)
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.1)
ret i32 0
}
@@ -289,31 +289,9 @@ entry:
; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_out_of_range6
%buf.sroa.0 = alloca [10 x i8], align 4
%ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 10
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
%0 = atomicrmw add ptr %ptr, i32 1 monotonic, align 4
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %buf.sroa.0)
- ret i32 0
-}
-
-; Check an alloca with potentially out of range GEP to ensure it gets a tag and
-; check.
-define i32 @test_potentially_out_of_range(ptr %a) sanitize_hwaddress {
-entry:
- ; CHECK-LABEL: @test_potentially_out_of_range
- ; NOSAFETY: call {{.*}}__hwasan_generate_tag
- ; NOSAFETY: call {{.*}}__hwasan_store
- ; SAFETY: call {{.*}}__hwasan_generate_tag
- ; SAFETY: call {{.*}}__hwasan_store
- ; NOSTACK-NOT: call {{.*}}__hwasan_generate_tag
- ; NOSTACK-NOT: call {{.*}}__hwasan_store
- ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_potentially_out_of_range
- ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_potentially_out_of_range
- %buf.sroa.0 = alloca [10 x i8], align 4
- %off = call i32 @getoffset()
- %ptr = getelementptr [10 x i8], ptr %buf.sroa.0, i32 0, i32 %off
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %ptr)
- store volatile i8 0, ptr %ptr, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %ptr)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -333,26 +311,6 @@ entry:
call void @llvm.memmove.p0.p0.i32(ptr %ptr, ptr %a, i32 1, i1 true)
ret i32 0
}
-; Check an alloca with potentially out of range GEP to ensure it gets a tag and
-; check.
-define i32 @test_unclear(ptr %a) sanitize_hwaddress {
-entry:
- ; CHECK-LABEL: @test_unclear
- ; NOSAFETY: call {{.*}}__hwasan_generate_tag
- ; NOSAFETY: call {{.*}}__hwasan_store
- ; SAFETY: call {{.*}}__hwasan_generate_tag
- ; SAFETY: call {{.*}}__hwasan_store
- ; NOSTACK-NOT: call {{.*}}__hwasan_generate_tag
- ; NOSTACK: call {{.*}}__hwasan_store
- ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_unclear
- ; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_unclear
- %buf.sroa.0 = alloca i8, align 4
- %ptr = call ptr @getptr(ptr %buf.sroa.0)
- call void @llvm.lifetime.start.p0(i64 10, ptr nonnull %ptr)
- store volatile i8 0, ptr %ptr, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 10, ptr nonnull %ptr)
- ret i32 0
-}
define i32 @test_select(ptr %a) sanitize_hwaddress {
entry:
@@ -367,11 +325,11 @@ entry:
; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_select
%x = call ptr @getptr(ptr %a)
%buf.sroa.0 = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
%c = call i1 @cond()
%ptr = select i1 %c, ptr %x, ptr %buf.sroa.0
store volatile i8 0, ptr %ptr, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -388,10 +346,10 @@ entry:
; SAFETY-REMARKS: --- !Missed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_retptr
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_retptr
%buf.sroa.0 = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf.sroa.0)
%ptr = call ptr @retptr(ptr %buf.sroa.0)
store volatile i8 0, ptr %ptr, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %buf.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf.sroa.0)
ret i32 0
}
@@ -408,17 +366,17 @@ entry:
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: safeAlloca{{[[:space:]]}}Function: test_lifetime_poison
; SAFETY-REMARKS: --- !Passed{{[[:space:]]}}Pass: hwasan{{[[:space:]]}}Name: ignoreAccess{{[[:space:]]}}Function: test_lifetime_poison
%buf.sroa.0 = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 1, ptr poison)
+ call void @llvm.lifetime.start.p0(ptr poison)
store volatile i8 0, ptr %buf.sroa.0, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 1, ptr poison)
+ call void @llvm.lifetime.end.p0(ptr poison)
ret i32 0
}
; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memset.p0.i32(ptr, i8, i32, i1)
declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll
index 57d37ca..af6411a 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope-setjmp.ll
@@ -79,13 +79,13 @@ sw.bb1: ; preds = %entry
br label %return
while.body: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 4096, ptr nonnull %buf) #10
+ call void @llvm.lifetime.start.p0(ptr nonnull %buf) #10
store ptr %buf, ptr @stackbuf, align 8
; may_jump may call longjmp, going back to the switch (and then the return),
; bypassing the lifetime.end. This is why we need to untag on the return,
; rather than the lifetime.end.
call void @may_jump()
- call void @llvm.lifetime.end.p0(i64 4096, ptr nonnull %buf) #10
+ call void @llvm.lifetime.end.p0(ptr nonnull %buf) #10
br label %return
return: ; preds = %entry, %while.body, %sw.bb1
@@ -95,5 +95,5 @@ return: ; preds = %entry, %while.body,
declare i32 @setjmp(ptr noundef) returns_twice
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll
index e30b518..cfded02 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll
@@ -26,13 +26,13 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress
; X86-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP10]] to ptr
; X86-SCOPE-NEXT: br label [[TMP11:%.*]]
; X86-SCOPE: 11:
-; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP6]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP12]], i64 16)
; X86-SCOPE-NEXT: [[TMP13:%.*]] = tail call i1 (...) @cond()
; X86-SCOPE-NEXT: [[TMP14:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP14]], i64 16)
-; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: br i1 [[TMP13]], label [[TMP15:%.*]], label [[TMP11]]
; X86-SCOPE: 15:
; X86-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
@@ -96,7 +96,7 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress
; AARCH64-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
; AARCH64-SCOPE-NEXT: br label [[TMP25:%.*]]
; AARCH64-SCOPE: 25:
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SCOPE-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SCOPE-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], 72057594037927935
@@ -110,7 +110,7 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress
; AARCH64-SCOPE-NEXT: [[TMP35:%.*]] = lshr i64 [[TMP34]], 4
; AARCH64-SCOPE-NEXT: [[TMP36:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP35]]
; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP36]], i8 [[TMP32]], i64 1, i1 false)
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: br i1 [[TMP31]], label [[TMP37:%.*]], label [[TMP25]]
; AARCH64-SCOPE: 37:
; AARCH64-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
@@ -198,7 +198,7 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress
; AARCH64-SHORT-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
; AARCH64-SHORT-SCOPE-NEXT: br label [[TMP25:%.*]]
; AARCH64-SHORT-SCOPE: 25:
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SHORT-SCOPE-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SHORT-SCOPE-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], 72057594037927935
@@ -215,7 +215,7 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress
; AARCH64-SHORT-SCOPE-NEXT: [[TMP37:%.*]] = lshr i64 [[TMP36]], 4
; AARCH64-SHORT-SCOPE-NEXT: [[TMP38:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP37]]
; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP38]], i8 [[TMP34]], i64 1, i1 false)
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: br i1 [[TMP33]], label [[TMP39:%.*]], label [[TMP25]]
; AARCH64-SHORT-SCOPE: 39:
; AARCH64-SHORT-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
@@ -279,10 +279,10 @@ define dso_local i32 @standard_lifetime() local_unnamed_addr sanitize_hwaddress
2: ; preds = %2, %0
; We should tag the memory after the br (in the loop).
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
%3 = tail call i1 (...) @cond() #2
; We should tag the memory before the next br (before the jump back).
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
br i1 %3, label %4, label %2
4: ; preds = %2
@@ -307,13 +307,13 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi
; X86-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP10]] to ptr
; X86-SCOPE-NEXT: br label [[TMP11:%.*]]
; X86-SCOPE: 11:
-; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: [[TMP12:%.*]] = trunc i64 [[TMP6]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP12]], i64 16)
; X86-SCOPE-NEXT: [[TMP13:%.*]] = tail call i1 (...) @cond()
; X86-SCOPE-NEXT: [[TMP14:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP14]], i64 16)
-; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: br i1 [[TMP13]], label [[TMP15:%.*]], label [[TMP11]]
; X86-SCOPE: 15:
; X86-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
@@ -377,7 +377,7 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi
; AARCH64-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
; AARCH64-SCOPE-NEXT: br label [[TMP25:%.*]]
; AARCH64-SCOPE: 25:
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SCOPE-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SCOPE-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], 72057594037927935
@@ -391,7 +391,7 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi
; AARCH64-SCOPE-NEXT: [[TMP35:%.*]] = lshr i64 [[TMP34]], 4
; AARCH64-SCOPE-NEXT: [[TMP36:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP35]]
; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP36]], i8 [[TMP32]], i64 1, i1 false)
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: br i1 [[TMP31]], label [[TMP37:%.*]], label [[TMP25]]
; AARCH64-SCOPE: 37:
; AARCH64-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
@@ -479,7 +479,7 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi
; AARCH64-SHORT-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
; AARCH64-SHORT-SCOPE-NEXT: br label [[TMP25:%.*]]
; AARCH64-SHORT-SCOPE: 25:
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: [[TMP26:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SHORT-SCOPE-NEXT: [[TMP27:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SHORT-SCOPE-NEXT: [[TMP28:%.*]] = and i64 [[TMP27]], 72057594037927935
@@ -496,7 +496,7 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi
; AARCH64-SHORT-SCOPE-NEXT: [[TMP37:%.*]] = lshr i64 [[TMP36]], 4
; AARCH64-SHORT-SCOPE-NEXT: [[TMP38:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP37]]
; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP38]], i8 [[TMP34]], i64 1, i1 false)
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: br i1 [[TMP33]], label [[TMP39:%.*]], label [[TMP25]]
; AARCH64-SHORT-SCOPE: 39:
; AARCH64-SHORT-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
@@ -560,10 +560,10 @@ define dso_local i32 @standard_lifetime_optnone() local_unnamed_addr optnone noi
2: ; preds = %2, %0
; We should tag the memory after the br (in the loop).
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
%3 = tail call i1 (...) @cond() #2
; We should tag the memory before the next br (before the jump back).
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
br i1 %3, label %4, label %2
4: ; preds = %2
@@ -809,12 +809,12 @@ define dso_local i32 @multiple_lifetimes() local_unnamed_addr sanitize_hwaddress
%1 = alloca i8, align 1
; We erase lifetime markers if we insert instrumentation outside of the
; lifetime.
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
call void @use(ptr nonnull %1) #2
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
call void @use(ptr nonnull %1) #2
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
ret i32 0
}
@@ -833,7 +833,7 @@ define dso_local i32 @unreachable_exit() local_unnamed_addr sanitize_hwaddress {
; X86-SCOPE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP6]], 57
; X86-SCOPE-NEXT: [[TMP10:%.*]] = or i64 [[TMP8]], [[TMP9]]
; X86-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP6]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP11]], i64 16)
; X86-SCOPE-NEXT: [[TMP12:%.*]] = tail call i1 (...) @cond()
@@ -906,7 +906,7 @@ define dso_local i32 @unreachable_exit() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SCOPE-NEXT: [[TMP23:%.*]] = shl i64 [[TMP20]], 56
; AARCH64-SCOPE-NEXT: [[TMP24:%.*]] = or i64 [[TMP22]], [[TMP23]]
; AARCH64-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SCOPE-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SCOPE-NEXT: [[TMP27:%.*]] = and i64 [[TMP26]], 72057594037927935
@@ -1019,7 +1019,7 @@ define dso_local i32 @unreachable_exit() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SHORT-SCOPE-NEXT: [[TMP23:%.*]] = shl i64 [[TMP20]], 56
; AARCH64-SHORT-SCOPE-NEXT: [[TMP24:%.*]] = or i64 [[TMP22]], [[TMP23]]
; AARCH64-SHORT-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SHORT-SCOPE-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SHORT-SCOPE-NEXT: [[TMP27:%.*]] = and i64 [[TMP26]], 72057594037927935
@@ -1109,13 +1109,13 @@ define dso_local i32 @unreachable_exit() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SHORT-NOSCOPE-NEXT: ret i32 0
;
%1 = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
%2 = tail call i1 (...) @cond() #2
br i1 %2, label %3, label %4
3:
call void @use(ptr nonnull %1) #2
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
ret i32 0
4:
@@ -1137,7 +1137,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; X86-SCOPE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP6]], 57
; X86-SCOPE-NEXT: [[TMP10:%.*]] = or i64 [[TMP8]], [[TMP9]]
; X86-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP10]] to ptr
-; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP6]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP11]], i64 16)
; X86-SCOPE-NEXT: [[TMP12:%.*]] = tail call i1 (...) @cond()
@@ -1146,12 +1146,12 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; X86-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
; X86-SCOPE-NEXT: [[TMP14:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP14]], i64 16)
-; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: br label [[TMP17:%.*]]
; X86-SCOPE: 15:
; X86-SCOPE-NEXT: [[TMP16:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP16]], i64 16)
-; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]])
; X86-SCOPE-NEXT: br label [[TMP17]]
; X86-SCOPE: 17:
; X86-SCOPE-NEXT: ret i32 0
@@ -1214,7 +1214,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SCOPE-NEXT: [[TMP23:%.*]] = shl i64 [[TMP20]], 56
; AARCH64-SCOPE-NEXT: [[TMP24:%.*]] = or i64 [[TMP22]], [[TMP23]]
; AARCH64-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SCOPE-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SCOPE-NEXT: [[TMP27:%.*]] = and i64 [[TMP26]], 72057594037927935
@@ -1231,7 +1231,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SCOPE-NEXT: [[TMP35:%.*]] = lshr i64 [[TMP34]], 4
; AARCH64-SCOPE-NEXT: [[TMP36:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP35]]
; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP36]], i8 [[TMP32]], i64 1, i1 false)
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: br label [[TMP43:%.*]]
; AARCH64-SCOPE: 37:
; AARCH64-SCOPE-NEXT: [[TMP38:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
@@ -1240,7 +1240,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SCOPE-NEXT: [[TMP41:%.*]] = lshr i64 [[TMP40]], 4
; AARCH64-SCOPE-NEXT: [[TMP42:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP41]]
; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP42]], i8 [[TMP38]], i64 1, i1 false)
-; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SCOPE-NEXT: br label [[TMP43]]
; AARCH64-SCOPE: 43:
; AARCH64-SCOPE-NEXT: ret i32 0
@@ -1327,7 +1327,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SHORT-SCOPE-NEXT: [[TMP23:%.*]] = shl i64 [[TMP20]], 56
; AARCH64-SHORT-SCOPE-NEXT: [[TMP24:%.*]] = or i64 [[TMP22]], [[TMP23]]
; AARCH64-SHORT-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP20]] to i8
; AARCH64-SHORT-SCOPE-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[TMP18]] to i64
; AARCH64-SHORT-SCOPE-NEXT: [[TMP27:%.*]] = and i64 [[TMP26]], 72057594037927935
@@ -1347,7 +1347,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SHORT-SCOPE-NEXT: [[TMP37:%.*]] = lshr i64 [[TMP36]], 4
; AARCH64-SHORT-SCOPE-NEXT: [[TMP38:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP37]]
; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP38]], i8 [[TMP34]], i64 1, i1 false)
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: br label [[TMP45:%.*]]
; AARCH64-SHORT-SCOPE: 39:
; AARCH64-SHORT-SCOPE-NEXT: [[TMP40:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
@@ -1356,7 +1356,7 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SHORT-SCOPE-NEXT: [[TMP43:%.*]] = lshr i64 [[TMP42]], 4
; AARCH64-SHORT-SCOPE-NEXT: [[TMP44:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP43]]
; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP44]], i8 [[TMP40]], i64 1, i1 false)
-; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[TMP18]])
+; AARCH64-SHORT-SCOPE-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP18]])
; AARCH64-SHORT-SCOPE-NEXT: br label [[TMP45]]
; AARCH64-SHORT-SCOPE: 45:
; AARCH64-SHORT-SCOPE-NEXT: ret i32 0
@@ -1417,17 +1417,17 @@ define dso_local i32 @diamond_lifetime() local_unnamed_addr sanitize_hwaddress {
; AARCH64-SHORT-NOSCOPE-NEXT: ret i32 0
;
%1 = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
%2 = tail call i1 (...) @cond() #2
br i1 %2, label %3, label %4
3:
call void @use(ptr nonnull %1) #2
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
br label %5
4:
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
br label %5
5:
@@ -1439,7 +1439,7 @@ declare dso_local i1 @cond(...) local_unnamed_addr
declare dso_local void @use(ptr) local_unnamed_addr
; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg-kmsan.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg-kmsan.ll
index 2189424..b64dfbf 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg-kmsan.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg-kmsan.ll
@@ -7,10 +7,10 @@ target triple = "aarch64-unknown-linux-gnu"
define i32 @foo(i32 %guard, ...) {
%vl = alloca %struct.__va_list, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -45,7 +45,7 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[STACK:%.*]] = getelementptr inbounds i8, ptr {{%.*}}, i32 192
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 16 {{%.*}}, ptr align 16 [[STACK]], i64 {{%.*}}, i1 false)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
index 0bd0968..f3cceb7c 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg.ll
@@ -7,10 +7,10 @@ target triple = "aarch64-unknown-linux-gnu"
define i32 @foo(i32 %guard, ...) {
%vl = alloca %struct.__va_list, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -45,10 +45,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[STACK:%.*]] = getelementptr inbounds i8, ptr {{%.*}}, i32 192
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 16 {{%.*}}, ptr align 16 [[STACK]], i64 {{%.*}}, i1 false)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i32 2, double 3.000000e+00,
diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
index 9133b32..06a34ac 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/vararg_shadow.ll
@@ -749,7 +749,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef %t, i32 noundef
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -809,26 +809,26 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef %t, i32 noundef
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.start.p0(ptr nocapture) #3
declare void @llvm.va_start(ptr) #4
declare void @llvm.va_end(ptr) #4
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.end.p0(ptr nocapture) #3
define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef %n, ...) sanitize_memory {
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IiEvT_iz(
@@ -842,7 +842,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -902,16 +902,16 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -927,7 +927,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -987,16 +987,16 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1012,7 +1012,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1072,16 +1072,16 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1097,7 +1097,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(fp128 noundef %t, i32 nound
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1157,16 +1157,16 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(fp128 noundef %t, i32 nound
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1182,7 +1182,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1242,16 +1242,16 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1267,7 +1267,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz([2 x i64] %t.coe
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1327,16 +1327,16 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz([2 x i64] %t.coe
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1352,7 +1352,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz([2 x double] a
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1412,16 +1412,16 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz([2 x double] a
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1437,7 +1437,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz([4 x double] alignst
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1497,16 +1497,16 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz([4 x double] alignst
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1522,7 +1522,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz([2 x i64] %t.co
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1582,16 +1582,16 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz([2 x i64] %t.co
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1607,7 +1607,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz([2 x fp128] ali
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1667,16 +1667,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz([2 x fp128] ali
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
@@ -1692,7 +1692,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz([4 x fp128] ali
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca %"struct.std::__va_list", align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 193514046488576
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1752,16 +1752,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz([4 x fp128] ali
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca %"struct.std::__va_list", align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #5
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %args) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #5
ret void
}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll b/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
index 52f4901..e05018c 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
@@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
@@ -32,15 +32,15 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 0
;
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -49,10 +49,10 @@ define i32 @foo(i32 %guard, ...) {
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
; CHECK-LABEL: define i32 @bar() {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll b/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll
index 23df3fc..e6d3a4b 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/LoongArch/vararg-loongarch64.ll
@@ -4,10 +4,10 @@ target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
target triple = "loongarch64-unknown-linux-gnu"
;; First, check allocation of the save area.
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @foo(i32 %guard, ...) {
; CHECK-LABEL: @foo
; CHECK: [[TMP1:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
@@ -17,10 +17,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP3]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP4]], i1 false)
;
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
index 64a76c5..69a74a3 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64.ll
@@ -5,10 +5,10 @@ target triple = "mips64--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800)
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
index 9f3127e..b19da8e 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips/vararg-mips64el.ll
@@ -5,10 +5,10 @@ target triple = "mips64el--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800)
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
index 05a88f0..4d47b02 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
@@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
@@ -32,15 +32,15 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 0
;
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -49,10 +49,10 @@ define i32 @foo(i32 %guard, ...) {
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
; CHECK-LABEL: define i32 @bar() {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
index 971b25f..98294e7 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
@@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
@@ -32,15 +32,15 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 0
;
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -49,10 +49,10 @@ define i32 @foo(i32 %guard, ...) {
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
; CHECK-LABEL: define i32 @bar() {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
index 45e8b2d..9351067 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64.ll
@@ -5,10 +5,10 @@ target triple = "powerpc64--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800)
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
index d6b956c..4151f3b 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC/vararg-ppc64le.ll
@@ -5,10 +5,10 @@ target triple = "powerpc64le--linux"
define i32 @foo(i32 %guard, ...) {
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -23,10 +23,10 @@ define i32 @foo(i32 %guard, ...) {
; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800)
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
%1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
index 246db9d..29d1fbd 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
@@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[TMP4]], 2147483647
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i32 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 [[TMP6]], i8 0, i32 4, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[VL]] to i32
; CHECK-NEXT: [[TMP8:%.*]] = and i32 [[TMP7]], 2147483647
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i32 [[TMP8]] to ptr
@@ -50,15 +50,15 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP31:%.*]] = inttoptr i32 [[TMP30]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP17]], ptr align 4 [[TMP31]], i32 [[TMP21]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 0
;
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -67,10 +67,10 @@ define i32 @foo(i32 %guard, ...) {
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
; CHECK-LABEL: define i32 @bar() {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll
index 4a7b7b2..a4d2e16 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll
@@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[TMP4]], 2147483647
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i32 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i32(ptr align 8 [[TMP6]], i8 0, i32 4, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[VL]] to i32
; CHECK-NEXT: [[TMP8:%.*]] = and i32 [[TMP7]], 2147483647
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i32 [[TMP8]] to ptr
@@ -50,15 +50,15 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP31:%.*]] = inttoptr i32 [[TMP30]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP17]], ptr align 4 [[TMP31]], i32 [[TMP21]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 0
;
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -67,10 +67,10 @@ define i32 @foo(i32 %guard, ...) {
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
; CHECK-LABEL: define i32 @bar() {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll b/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll
index 50e7be1..0c6e75c 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll
@@ -18,7 +18,7 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
@@ -32,15 +32,15 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VL]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 0
;
%vl = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i32 0
}
@@ -49,10 +49,10 @@ define i32 @foo(i32 %guard, ...) {
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
define i32 @bar() {
; CHECK-LABEL: define i32 @bar() {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
index e0b5907..c340d15 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg-kernel.ll
@@ -4,17 +4,17 @@ target datalayout = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64"
target triple = "s390x-unknown-linux-gnu"
%struct.__va_list = type { i64, i64, ptr, ptr }
-declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
declare void @llvm.va_start(ptr)
declare void @llvm.va_end(ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.end.p0(ptr)
define i64 @foo(i64 %guard, ...) #1 {
%vl = alloca %struct.__va_list
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i64 0
}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll
index 009aef9..91b21ea 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/SystemZ/vararg.ll
@@ -7,10 +7,10 @@ target triple = "s390x-unknown-linux-gnu"
define i64 @foo(i64 %guard, ...) {
%vl = alloca %struct.__va_list, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.start.p0(ptr %vl)
call void @llvm.va_start(ptr %vl)
call void @llvm.va_end(ptr %vl)
- call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ call void @llvm.lifetime.end.p0(ptr %vl)
ret i64 0
}
@@ -28,10 +28,10 @@ define i64 @foo(i64 %guard, ...) {
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 {{%.*}}, ptr align 8 {{%.*}}, i64 160, i1 false)
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 {{%.*}}, ptr align 8 {{%.*}}, i64 [[A]], i1 false)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare i32 @random_i32()
declare i64 @random_i64()
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
index 7a3f0dd..b61cb6a 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_call.ll
@@ -29,7 +29,7 @@ entry:
define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #2
call void @llvm.va_start(ptr nonnull %args)
%cmp9 = icmp sgt i32 %n, 0
br i1 %cmp9, label %for.body.lr.ph, label %for.end
@@ -85,13 +85,13 @@ vaarg.end: ; preds = %vaarg.in_mem, %vaar
for.end: ; preds = %vaarg.end, %entry
%sum.0.lcssa = phi i32 [ 0, %entry ], [ %add, %vaarg.end ]
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #2
ret i32 %sum.0.lcssa
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nounwind
declare void @llvm.va_start(ptr) #2
@@ -100,7 +100,7 @@ declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare dso_local i80 @sum_i80(i32, ...) local_unnamed_addr
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
index 2051015..4bc14da 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/vararg_shadow.ll
@@ -551,7 +551,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -581,26 +581,26 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
declare void @llvm.va_start(ptr) #5
declare void @llvm.va_end(ptr) #5
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef %n, ...) sanitize_memory {
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IiEvT_iz(
@@ -614,7 +614,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -644,16 +644,16 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -669,7 +669,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -699,16 +699,16 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -724,7 +724,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -754,16 +754,16 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -779,7 +779,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -809,16 +809,16 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -834,7 +834,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -864,16 +864,16 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -889,7 +889,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -919,16 +919,16 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -944,7 +944,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -974,16 +974,16 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -999,7 +999,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1029,16 +1029,16 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -1054,7 +1054,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1084,16 +1084,16 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -1109,7 +1109,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1139,16 +1139,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -1164,7 +1164,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1194,16 +1194,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/alloca.ll b/llvm/test/Instrumentation/MemorySanitizer/alloca.ll
index 40ade5f..c05702b 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/alloca.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/alloca.ll
@@ -125,12 +125,12 @@ entry:
br label %another_bb
another_bb:
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store i32 7, ptr %x
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
store i32 8, ptr %x
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -158,8 +158,10 @@ another_bb:
define void @lifetime_start_var(i64 %cnt) sanitize_memory {
entry:
%x = alloca i32, i64 %cnt, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
+ call void @llvm.lifetime.start.p0(ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
ret void
}
@@ -176,5 +178,5 @@ entry:
; CHECK: call void @llvm.lifetime.end
; CHECK: ret void
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll
index b27ef5d..2745939 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll
@@ -93,7 +93,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP6]], i8 0, i64 24, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -2147483649
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
@@ -165,7 +165,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD]], %[[VAARG_END]] ]
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]]
;
@@ -186,7 +186,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; ORIGIN-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -4
; ORIGIN-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; ORIGIN-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP7]], i8 0, i64 24, i1 false)
-; ORIGIN-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; ORIGIN-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; ORIGIN-NEXT: [[TMP23:%.*]] = ptrtoint ptr [[ARGS]] to i64
; ORIGIN-NEXT: [[TMP11:%.*]] = and i64 [[TMP23]], -2147483649
; ORIGIN-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -266,7 +266,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; ORIGIN: [[FOR_END]]:
; ORIGIN-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD]], %[[VAARG_END]] ]
; ORIGIN-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; ORIGIN-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; ORIGIN-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; ORIGIN-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4
; ORIGIN-NEXT: ret i32 [[SUM_0_LCSSA]]
@@ -288,7 +288,7 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; ORIGIN2-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -4
; ORIGIN2-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; ORIGIN2-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[TMP7]], i8 0, i64 24, i1 false)
-; ORIGIN2-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; ORIGIN2-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; ORIGIN2-NEXT: [[TMP23:%.*]] = ptrtoint ptr [[ARGS]] to i64
; ORIGIN2-NEXT: [[TMP11:%.*]] = and i64 [[TMP23]], -2147483649
; ORIGIN2-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
@@ -368,14 +368,14 @@ define dso_local i32 @sum(i32 %n, ...) local_unnamed_addr #0 {
; ORIGIN2: [[FOR_END]]:
; ORIGIN2-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD]], %[[VAARG_END]] ]
; ORIGIN2-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; ORIGIN2-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; ORIGIN2-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; ORIGIN2-NEXT: store i32 0, ptr @__msan_retval_tls, align 8
; ORIGIN2-NEXT: store i32 0, ptr @__msan_retval_origin_tls, align 4
; ORIGIN2-NEXT: ret i32 [[SUM_0_LCSSA]]
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #2
call void @llvm.va_start(ptr nonnull %args)
%cmp9 = icmp sgt i32 %n, 0
br i1 %cmp9, label %for.body.lr.ph, label %for.end
@@ -419,13 +419,13 @@ vaarg.end: ; preds = %vaarg.in_mem, %vaar
for.end: ; preds = %vaarg.end, %entry
%sum.0.lcssa = phi i32 [ 0, %entry ], [ %add, %vaarg.end ]
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #2
ret i32 %sum.0.lcssa
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nounwind
declare void @llvm.va_start(ptr) #2
@@ -434,7 +434,7 @@ declare void @llvm.va_start(ptr) #2
declare void @llvm.va_end(ptr) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare dso_local i80 @sum_i80(i32, ...) local_unnamed_addr
diff --git a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll
index aedefca..74a6276 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll
@@ -562,7 +562,7 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -582,26 +582,26 @@ define linkonce_odr dso_local void @_Z5test2IcEvT_iz(i8 noundef signext %t, i32
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
declare void @llvm.va_start(ptr) #5
declare void @llvm.va_end(ptr) #5
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef %n, ...) sanitize_memory {
; CHECK-LABEL: define linkonce_odr dso_local void @_Z5test2IiEvT_iz(
@@ -614,7 +614,7 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -634,16 +634,16 @@ define linkonce_odr dso_local void @_Z5test2IiEvT_iz(i32 noundef %t, i32 noundef
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -658,7 +658,7 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -678,16 +678,16 @@ define linkonce_odr dso_local void @_Z5test2IfEvT_iz(float noundef %t, i32 nound
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -702,7 +702,7 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -722,16 +722,16 @@ define linkonce_odr dso_local void @_Z5test2IdEvT_iz(double noundef %t, i32 noun
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -746,7 +746,7 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -766,16 +766,16 @@ define linkonce_odr dso_local void @_Z5test2IeEvT_iz(x86_fp80 noundef %t, i32 no
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -790,7 +790,7 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -810,16 +810,16 @@ define linkonce_odr dso_local void @_Z5test2I6IntIntEvT_iz(i64 %t.coerce, i32 no
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -834,7 +834,7 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -854,16 +854,16 @@ define linkonce_odr dso_local void @_Z5test2I10Int64Int64EvT_iz(i64 %t.coerce0,
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -878,7 +878,7 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -898,16 +898,16 @@ define linkonce_odr dso_local void @_Z5test2I12DoubleDoubleEvT_iz(double %t.coer
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -922,7 +922,7 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -942,16 +942,16 @@ define linkonce_odr dso_local void @_Z5test2I7Double4EvT_iz(ptr noundef byval(%s
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -966,7 +966,7 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -986,16 +986,16 @@ define linkonce_odr dso_local void @_Z5test2I11DoubleFloatEvT_iz(double %t.coerc
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -1010,7 +1010,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1030,16 +1030,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble2EvT_iz(ptr noundef byv
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
@@ -1054,7 +1054,7 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[ARGS:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[ARGS]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], -2147483649
; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
@@ -1074,16 +1074,16 @@ define linkonce_odr dso_local void @_Z5test2I11LongDouble4EvT_iz(ptr noundef byv
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @_Z3usePv(ptr noundef nonnull [[ARGS]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr nonnull [[ARGS]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr nonnull [[ARGS]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[ARGS]])
; CHECK-NEXT: ret void
;
entry:
%args = alloca [1 x %struct.__va_list_tag], align 16
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %args) #6
call void @llvm.va_start(ptr nonnull %args)
call void @_Z3usePv(ptr noundef nonnull %args)
call void @llvm.va_end(ptr nonnull %args)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %args) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %args) #6
ret void
}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
index f07f3ad..04fdd23 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_debug_info.ll
@@ -664,8 +664,8 @@ entry:
declare i32 @NoSanitizeMemoryUndefHelper(i32 %x)
-declare void @llvm.lifetime.start.p0(i64 immarg %0, ptr nocapture %1)
-declare void @llvm.lifetime.end.p0(i64 immarg %0, ptr nocapture %1)
+declare void @llvm.lifetime.start.p0(ptr nocapture %1)
+declare void @llvm.lifetime.end.p0(ptr nocapture %1)
declare void @foo8(ptr nocapture)
@@ -674,7 +674,7 @@ define void @msan() sanitize_memory {
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.donothing(), !dbg [[DBG1]]
; CHECK-NEXT: [[TEXT:%.*]] = alloca i8, align 1, !dbg [[DBG1]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TEXT]]), !dbg [[DBG7]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TEXT]]), !dbg [[DBG7]]
; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[TEXT]] to i64, !dbg [[DBG7]]
; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080, !dbg [[DBG7]]
; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr, !dbg [[DBG7]]
@@ -685,13 +685,13 @@ define void @msan() sanitize_memory {
; CHECK-NEXT: call void @__msan_set_alloca_origin_with_descr(ptr [[TEXT]], i64 1, ptr @[[GLOB6:[0-9]+]], ptr @[[GLOB7:[0-9]+]]), !dbg [[DBG7]]
; CHECK-NEXT: store i64 0, ptr @__msan_param_tls, align 8, !dbg [[DBG8]]
; CHECK-NEXT: call void @foo8(ptr [[TEXT]]), !dbg [[DBG8]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TEXT]]), !dbg
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TEXT]]), !dbg
; CHECK-NEXT: ret void, !dbg
;
entry:
%text = alloca i8, align 1, !dbg !10
- call void @llvm.lifetime.start.p0(i64 1, ptr %text), !dbg !11
+ call void @llvm.lifetime.start.p0(ptr %text), !dbg !11
call void @foo8(ptr %text), !dbg !12
- call void @llvm.lifetime.end.p0(i64 1, ptr %text), !dbg !13
+ call void @llvm.lifetime.end.p0(ptr %text), !dbg !13
ret void, !dbg !14
}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll b/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll
index 2cc8fd6..5779367 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/msan_llvm_launder_invariant.ll
@@ -12,14 +12,14 @@ target triple = "x86_64-unknown-linux-gnu"
define dso_local ptr @_Z1fv() local_unnamed_addr #0 {
entry:
%p = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %p)
+ call void @llvm.lifetime.start.p0(ptr nonnull %p)
%0 = load i8, ptr @flag, align 1
%tobool = icmp ne i8 %0, 0
%call = call zeroext i1 @_Z2f1PPvb(ptr nonnull %p, i1 zeroext %tobool)
%1 = load ptr, ptr %p, align 8
%2 = call ptr @llvm.launder.invariant.group.p0(ptr %1)
%retval.0 = select i1 %call, ptr %2, ptr null
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %p)
+ call void @llvm.lifetime.end.p0(ptr nonnull %p)
ret ptr %retval.0
}
@@ -29,8 +29,8 @@ declare dso_local zeroext i1 @_Z2f1PPvb(ptr, i1 zeroext) local_unnamed_addr
declare ptr @llvm.launder.invariant.group.p0(ptr)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
attributes #0 = { sanitize_memory uwtable }
diff --git a/llvm/test/Instrumentation/TypeSanitizer/alloca.ll b/llvm/test/Instrumentation/TypeSanitizer/alloca.ll
index fc72631..deddecf 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/alloca.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/alloca.ll
@@ -48,7 +48,7 @@ define void @alloca_lifetime_test(i1 %c) sanitize_type {
; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], [[SHADOW_BASE]]
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 80, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 10, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
; CHECK-NEXT: call void @alloca_test_use(ptr [[X]])
; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[X]] to i64
; CHECK-NEXT: [[TMP11:%.*]] = and i64 [[TMP10]], [[APP_MEM_MASK]]
@@ -56,7 +56,7 @@ define void @alloca_lifetime_test(i1 %c) sanitize_type {
; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], [[SHADOW_BASE]]
; CHECK-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP14]], i8 0, i64 80, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 10, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
@@ -66,9 +66,9 @@ entry:
br label %loop
loop:
- call void @llvm.lifetime.start.p0(i64 10, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
call void @alloca_test_use(ptr %x)
- call void @llvm.lifetime.end.p0(i64 10, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
br i1 %c, label %loop, label %exit
exit:
@@ -99,7 +99,7 @@ define void @dynamic_alloca_lifetime_test(i1 %c, i64 %n) sanitize_type {
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
; CHECK-NEXT: [[TMP13:%.*]] = shl i64 [[TMP7]], 3
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP12]], i8 0, i64 [[TMP13]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[X]])
; CHECK-NEXT: call void @alloca_test_use(ptr [[X]])
; CHECK-NEXT: [[TMP14:%.*]] = mul i64 [[N]], 4
; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[X]] to i64
@@ -109,7 +109,7 @@ define void @dynamic_alloca_lifetime_test(i1 %c, i64 %n) sanitize_type {
; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
; CHECK-NEXT: [[TMP20:%.*]] = shl i64 [[TMP14]], 3
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP19]], i8 0, i64 [[TMP20]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[X]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
@@ -119,9 +119,9 @@ entry:
br label %loop
loop:
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
call void @alloca_test_use(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
br i1 %c, label %loop, label %exit
exit:
diff --git a/llvm/test/MC/RISCV/Relocations/align-after-relax.s b/llvm/test/MC/RISCV/Relocations/align-after-relax.s
new file mode 100644
index 0000000..95bef51
--- /dev/null
+++ b/llvm/test/MC/RISCV/Relocations/align-after-relax.s
@@ -0,0 +1,50 @@
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+c,+relax %s --defsym LATE=1 -o %t1
+# RUN: llvm-objdump -dr --no-show-raw-insn -M no-aliases %t1 | FileCheck %s
+
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+c,+relax %s -o %t0
+# RUN: llvm-objdump -dr --no-show-raw-insn -M no-aliases %t0 | FileCheck %s --check-prefix=CHECK0
+
+# CHECK: 4: 00 00 01 00 .word 0x00010000
+# CHECK-EMPTY:
+# CHECK: 8: 78 56 34 12 .word 0x12345678
+# CHECK-NEXT: c: 00 00 00 00 .word 0x00000000
+# CHECK: 10: auipc ra, 0x0
+# CHECK-NEXT: R_RISCV_CALL_PLT foo
+# CHECK-NEXT: R_RISCV_RELAX *ABS*
+# CHECK: 18: c.nop
+# CHECK-NEXT: R_RISCV_ALIGN *ABS*+0x6
+
+## Alignment directives in a lower-numbered subsection may be conservatively treated as linker-relaxable.
+# CHECK0: 4: 00 00 01 00 .word 0x00010000
+# CHECK0-NEXT: 000000006: R_RISCV_ALIGN *ABS*+0x6
+# CHECK0-NEXT: 8: 13 00 00 00 .word 0x00000013
+# CHECK0: 14: auipc ra, 0x0
+# CHECK0: 1c: c.nop
+# CHECK0-NEXT: R_RISCV_ALIGN *ABS*+0x6
+
+.text 2
+.option push
+.option norelax
+## R_RISCV_ALIGN is required even if norelax, because it is after a linker-relaxable instruction.
+.balign 8
+l2:
+ .word 0x12345678
+.option pop
+
+.text 1
+ .org .+1
+ .org .+3
+.ifdef LATE
+ .org .+0
+.endif
+ call foo
+
+.text 0
+_start:
+ .space 6
+.option push
+.option norelax
+.balign 8
+l0:
+ .word 0x12345678
+.option pop
diff --git a/llvm/test/MC/RISCV/Relocations/align-norvc.s b/llvm/test/MC/RISCV/Relocations/align-norvc.s
new file mode 100644
index 0000000..c3fe71e
--- /dev/null
+++ b/llvm/test/MC/RISCV/Relocations/align-norvc.s
@@ -0,0 +1,23 @@
+## To ensure ALIGN relocations in norvc code can adapt to shrinking of preceding rvc code,
+## we generate $alignment-2 bytes of NOPs regardless of rvc.
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+relax %s -o %t
+# RUN: llvm-objdump -dr -M no-aliases %t | FileCheck %s
+
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+relax -riscv-align-rvc=0 %s -o %t0
+# RUN: llvm-objdump -dr -M no-aliases %t0 | FileCheck %s --check-prefix=CHECK0
+
+# CHECK: 00000000: R_RISCV_RELAX *ABS*
+# CHECK-NEXT: 4: 0001 <unknown>
+# CHECK-NEXT: 00000004: R_RISCV_ALIGN *ABS*+0x6
+# CHECK-NEXT: 6: 00000013 addi zero, zero, 0x0
+# CHECK-NEXT: a: 00000537 lui a0, 0x0
+
+# CHECK0: 00000000: R_RISCV_RELAX *ABS*
+# CHECK0-NEXT: 4: 00000013 addi zero, zero, 0x0
+# CHECK0-NEXT: 00000004: R_RISCV_ALIGN *ABS*+0x4
+# CHECK0-NEXT: 8: 00000537 lui a0, 0x0
+
+ lui a0, %hi(foo)
+ .option norvc
+.balign 8
+ lui a0, %hi(foo)
diff --git a/llvm/test/MC/RISCV/Relocations/mc-dump.s b/llvm/test/MC/RISCV/Relocations/mc-dump.s
index ddc0c7d..99d34b5 100644
--- a/llvm/test/MC/RISCV/Relocations/mc-dump.s
+++ b/llvm/test/MC/RISCV/Relocations/mc-dump.s
@@ -9,12 +9,12 @@
# CHECK-NEXT:0 Data LinkerRelaxable Size:8 [97,00,00,00,e7,80,00,00]
# CHECK-NEXT: Fixup @0 Value:specifier(19,ext) Kind:4023
# CHECK-NEXT: Symbol @0 $x
-# CHECK-NEXT:8 Align LinkerRelaxable Size:0+4 []
+# CHECK-NEXT:8 Align LinkerRelaxable Size:0+6 []
# CHECK-NEXT: Align:8 Fill:0 FillLen:1 MaxBytesToEmit:8 Nops
-# CHECK-NEXT: Fixup @0 Value:4 Kind:[[#]]
-# CHECK-NEXT:12 Align LinkerRelaxable Size:4+4 [13,05,30,00]
+# CHECK-NEXT: Fixup @0 Value:6 Kind:[[#]]
+# CHECK-NEXT:14 Align LinkerRelaxable Size:4+6 [13,05,30,00]
# CHECK-NEXT: Align:8 Fill:0 FillLen:1 MaxBytesToEmit:8 Nops
-# CHECK-NEXT: Fixup @4 Value:4 Kind:[[#]]
+# CHECK-NEXT: Fixup @4 Value:6 Kind:[[#]]
# CHECK-NEXT:]
call ext
diff --git a/llvm/test/MC/RISCV/align-option-relax.s b/llvm/test/MC/RISCV/align-option-relax.s
index 890e1e7..60cd55f 100644
--- a/llvm/test/MC/RISCV/align-option-relax.s
+++ b/llvm/test/MC/RISCV/align-option-relax.s
@@ -1,8 +1,21 @@
# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=-relax < %s \
# RUN: | llvm-readobj -r - | FileCheck %s
-# Check that .option relax overrides -mno-relax and enables R_RISCV_ALIGN
-# relocations.
-# CHECK: R_RISCV_ALIGN
- .option relax
- .align 4
+## .option relax overrides -mno-relax and enables R_RISCV_ALIGN/R_RISCV_RELAX relocations.
+# CHECK: .rela.text
+# CHECK: R_RISCV_CALL_PLT
+# CHECK-NEXT: R_RISCV_RELAX
+# CHECK-NEXT: R_RISCV_ALIGN
+.option relax
+call foo
+.align 4
+
+## Alignments before the first linker-relaxable instruction do not need relocations.
+# CHECK-NOT: .rela.text1
+.section .text1,"ax"
+.align 4
+nop
+
+# CHECK: .rela.text2
+.section .text2,"ax"
+call foo
diff --git a/llvm/test/MC/RISCV/align.s b/llvm/test/MC/RISCV/align.s
index da3b1aa..4d4d998 100644
--- a/llvm/test/MC/RISCV/align.s
+++ b/llvm/test/MC/RISCV/align.s
@@ -46,20 +46,21 @@
# type for .align N directive when linker relaxation enabled.
# Linker could satisfy alignment by removing NOPs after linker relaxation.
-# The first R_RISCV_ALIGN come from
-# MCELFStreamer::InitSections() emitCodeAlignment(getTextSectionAligntment()).
-# C-OR-ZCA-EXT-RELAX-RELOC: R_RISCV_ALIGN - 0x2
-# C-OR-ZCA-EXT-RELAX-INST: c.nop
test:
+## Start with a linker-relaxable instruction so that the following alignment can be relaxable.
+ call foo
+# NORELAX-RELOC: R_RISCV_CALL_PLT
+# C-OR-ZCA-EXT-NORELAX-RELOC: R_RISCV_CALL_PLT
+
.p2align 2
# If the +c extension is enabled, the text section will be 2-byte aligned, so
# one c.nop instruction is sufficient.
-# C-OR-ZCA-EXT-RELAX-RELOC-NOT: R_RISCV_ALIGN - 0x2
-# C-OR-ZCA-EXT-RELAX-INST-NOT: c.nop
+# C-OR-ZCA-EXT-RELAX-RELOC: R_RISCV_ALIGN - 0x2
+# C-OR-ZCA-EXT-RELAX-INST: c.nop
bne zero, a0, .LBB0_2
mv a0, zero
.p2align 3
-# RELAX-RELOC: R_RISCV_ALIGN - 0x4
+# RELAX-RELOC: R_RISCV_ALIGN - 0x6
# RELAX-INST: addi zero, zero, 0
# C-OR-ZCA-EXT-RELAX-RELOC: R_RISCV_ALIGN - 0x6
# C-OR-ZCA-EXT-RELAX-INST: c.nop
@@ -68,7 +69,7 @@ test:
add a0, a0, a1
.align 4
.LBB0_2:
-# RELAX-RELOC: R_RISCV_ALIGN - 0xC
+# RELAX-RELOC: R_RISCV_ALIGN - 0xE
# RELAX-INST: addi zero, zero, 0
# RELAX-INST: addi zero, zero, 0
# RELAX-INST: addi zero, zero, 0
@@ -84,7 +85,7 @@ test:
.p2align 3
.constant_pool:
.long 3126770193
-# RELAX-RELOC: R_RISCV_ALIGN - 0x4
+# RELAX-RELOC: R_RISCV_ALIGN - 0x6
# RELAX-INST: addi zero, zero, 0
# NORELAX-INST: addi zero, zero, 0
# C-OR-ZCA-EXT-RELAX-RELOC: R_RISCV_ALIGN - 0x6
@@ -136,16 +137,8 @@ data2:
add a0, a0, a1
## Branches crossing the linker-relaxable R_RISCV_ALIGN need relocations.
-# RELAX-RELOC: .rela.text3 {
-# RELAX-RELOC-NEXT: 0x4 R_RISCV_BRANCH .Ltmp[[#]] 0x0
-# RELAX-RELOC-NEXT: 0x8 R_RISCV_ALIGN - 0x4
-# RELAX-RELOC-NEXT: 0xC R_RISCV_BRANCH .Ltmp[[#]] 0x0
-# RELAX-RELOC-NEXT: }
-# C-OR-ZCA-EXT-RELAX-RELOC: .rela.text3 {
-# C-OR-ZCA-EXT-RELAX-RELOC-NEXT: 0x4 R_RISCV_BRANCH .Ltmp[[#]] 0x0
-# C-OR-ZCA-EXT-RELAX-RELOC-NEXT: 0x8 R_RISCV_ALIGN - 0x4
-# C-OR-ZCA-EXT-RELAX-RELOC-NEXT: 0xC R_RISCV_BRANCH .Ltmp[[#]] 0x0
-# C-OR-ZCA-EXT-RELAX-RELOC-NEXT: }
+# RELAX-RELOC-NOT: .rela.text3 {
+# C-OR-ZCA-EXT-RELAX-RELOC-NOT: .rela.text3 {
.section .text3, "ax"
bnez t1, 1f
bnez t2, 2f
@@ -157,14 +150,15 @@ data2:
## .text3 with a call at the start
# NORELAX-RELOC: .rela.text3a
-# C-OR-ZCA-EXT-NORELAX-RELOC: .rela.text3a
# RELAX-RELOC: .rela.text3a {
# RELAX-RELOC-NEXT: 0x0 R_RISCV_CALL_PLT foo 0x0
# RELAX-RELOC-NEXT: 0x0 R_RISCV_RELAX - 0x0
# RELAX-RELOC-NEXT: 0xC R_RISCV_BRANCH .Ltmp[[#]] 0x0
-# RELAX-RELOC-NEXT: 0x10 R_RISCV_ALIGN - 0x4
-# RELAX-RELOC-NEXT: 0x14 R_RISCV_BRANCH .Ltmp[[#]] 0x0
+# RELAX-RELOC-NEXT: 0x10 R_RISCV_ALIGN - 0x6
+# RELAX-RELOC-NEXT: 0x16 R_RISCV_BRANCH .Ltmp[[#]] 0x0
# RELAX-RELOC-NEXT: }
+# C-OR-ZCA-EXT-NORELAX-RELOC: .rela.text3a
+# C-OR-ZCA-EXT-RELAX-RELOC: .rela.text3a
.section .text3a, "ax"
call foo
bnez t1, 1f
@@ -177,11 +171,8 @@ bnez t1, 2b
## .text3 with a call at the end
# RELAX-RELOC: .rela.text3b {
-# RELAX-RELOC-NEXT: 0x4 R_RISCV_BRANCH .Ltmp[[#]] 0x0
-# RELAX-RELOC-NEXT: 0x8 R_RISCV_ALIGN - 0x4
-# RELAX-RELOC-NEXT: 0xC R_RISCV_BRANCH .Ltmp[[#]] 0x0
-# RELAX-RELOC-NEXT: 0x14 R_RISCV_CALL_PLT foo 0x0
-# RELAX-RELOC-NEXT: 0x14 R_RISCV_RELAX - 0x0
+# RELAX-RELOC-NEXT: 0x10 R_RISCV_CALL_PLT foo 0x0
+# RELAX-RELOC-NEXT: 0x10 R_RISCV_RELAX - 0x0
# RELAX-RELOC-NEXT: }
.section .text3b, "ax"
bnez t1, 1f
diff --git a/llvm/test/MC/RISCV/cfi-advance.s b/llvm/test/MC/RISCV/cfi-advance.s
index f3f8530..7643e01 100644
--- a/llvm/test/MC/RISCV/cfi-advance.s
+++ b/llvm/test/MC/RISCV/cfi-advance.s
@@ -7,7 +7,7 @@
# NORELAX: Relocation section '.rela.text1' at offset {{.*}} contains 1 entries:
# NORELAX-NEXT: Offset Info Type Sym. Value Symbol's Name + Addend
-# NORELAX-NEXT: 00000000 00000313 R_RISCV_CALL_PLT 00000004 .L0 + 0
+# NORELAX-NEXT: 00000000 00000313 R_RISCV_CALL_PLT 00000008 .L0 + 0
# NORELAX-EMPTY:
# RELAX: Relocation section '.rela.text1' at offset {{.*}} contains 2 entries:
# RELAX: R_RISCV_CALL_PLT
@@ -16,23 +16,25 @@
# NORELAX-NEXT: Relocation section '.rela.eh_frame' at offset {{.*}} contains 1 entries:
# NORELAX: Offset Info Type Sym. Value Symbol's Name + Addend
# NORELAX-NEXT: 0000001c 00000139 R_RISCV_32_PCREL 00000000 .L0 + 0
-# RELAX-NEXT: Relocation section '.rela.eh_frame' at offset {{.*}} contains 5 entries:
+# RELAX-NEXT: Relocation section '.rela.eh_frame' at offset {{.*}} contains 7 entries:
# RELAX: Offset Info Type Sym. Value Symbol's Name + Addend
# RELAX-NEXT: 0000001c 00000139 R_RISCV_32_PCREL 00000000 .L0 + 0
-# RELAX-NEXT: 00000020 00000c23 R_RISCV_ADD32 0001017a .L0 + 0
+# RELAX-NEXT: 00000020 00000d23 R_RISCV_ADD32 0001017c .L0 + 0
# RELAX-NEXT: 00000020 00000127 R_RISCV_SUB32 00000000 .L0 + 0
-# RELAX-NEXT: 00000035 00000b35 R_RISCV_SET6 00010176 .L0 + 0
-# RELAX-NEXT: 00000035 00000934 R_RISCV_SUB6 0001016e .L0 + 0
+# RELAX-NEXT: 00000026 00000536 R_RISCV_SET8 00000068 .L0 + 0
+# RELAX-NEXT: 00000026 00000125 R_RISCV_SUB8 00000000 .L0 + 0
+# RELAX-NEXT: 00000035 00000c35 R_RISCV_SET6 00010178 .L0 + 0
+# RELAX-NEXT: 00000035 00000a34 R_RISCV_SUB6 0001016e .L0 + 0
# CHECK-EMPTY:
-# NORELAX: Symbol table '.symtab' contains 13 entries:
-# RELAX: Symbol table '.symtab' contains 16 entries:
+# NORELAX: Symbol table '.symtab' contains 14 entries:
+# RELAX: Symbol table '.symtab' contains 18 entries:
# RELAX-NEXT: Num: Value Size Type Bind Vis Ndx Name
# RELAX-NEXT: 0: 00000000 0 NOTYPE LOCAL DEFAULT UND
# RELAX-NEXT: 1: 00000000 0 NOTYPE LOCAL DEFAULT 2 .L0 {{$}}
-# RELAX: 3: 00000004 0 NOTYPE LOCAL DEFAULT 2 .L0{{$}}
-# RELAX: 9: 0001016e 0 NOTYPE LOCAL DEFAULT 2 .L0 {{$}}
-# RELAX: 11: 00010176 0 NOTYPE LOCAL DEFAULT 2 .L0 {{$}}
-# RELAX: 12: 0001017a 0 NOTYPE LOCAL DEFAULT 2 .L0 {{$}}
+# RELAX: 3: 00000008 0 NOTYPE LOCAL DEFAULT 2 .L0{{$}}
+# RELAX: 10: 0001016e 0 NOTYPE LOCAL DEFAULT 2 .L0 {{$}}
+# RELAX: 12: 00010178 0 NOTYPE LOCAL DEFAULT 2 .L0 {{$}}
+# RELAX: 13: 0001017c 0 NOTYPE LOCAL DEFAULT 2 .L0 {{$}}
# CHECK-DWARFDUMP: DW_CFA_advance_loc1: 104
# CHECK-DWARFDUMP-NEXT: DW_CFA_def_cfa_offset: +8
@@ -48,11 +50,11 @@
.type test,@function
test:
.cfi_startproc
- nop
+ call foo
## This looks similar to fake label names ".L0 ". Even if this is ".L0 ",
## the assembler will not conflate it with fake labels.
.L0:
- .zero 100, 0x90
+ .zero 96, 0x90
.cfi_def_cfa_offset 8
nop
.zero 255, 0x90
diff --git a/llvm/test/MC/RISCV/nop-slide.s b/llvm/test/MC/RISCV/nop-slide.s
index a49ffdc..9e7401d 100644
--- a/llvm/test/MC/RISCV/nop-slide.s
+++ b/llvm/test/MC/RISCV/nop-slide.s
@@ -1,5 +1,5 @@
-# RUN: llvm-mc -triple riscv64 -mattr +c,-relax -filetype obj -o - %s | llvm-objdump -d - | FileCheck %s -check-prefix CHECK-RVC-NORELAX
-# RUN: llvm-mc -triple riscv64 -mattr +c,+relax -filetype obj -o - %s | llvm-objdump -d - | FileCheck %s -check-prefix CHECK-RVC-RELAX
+# RUN: llvm-mc -triple riscv64 -mattr +c,-relax -filetype obj -o - %s | llvm-objdump -d - | FileCheck %s
+# RUN: llvm-mc -triple riscv64 -mattr +c,+relax -filetype obj -o - %s | llvm-objdump -d - | FileCheck %s
# RUN: llvm-mc -triple riscv64 -mattr -c,-relax -filetype obj -o - %s | llvm-objdump -d - | FileCheck %s
# RUN: llvm-mc -triple riscv64 -mattr -c,+relax -filetype obj -o - %s | llvm-objdump -d - | FileCheck %s
@@ -9,16 +9,6 @@
.balign 4
auipc a0, 0
-# CHECK-RVC-NORELAX: 0000000000000000 <.text>:
-# CHECK-RVC-NORELAX-NEXT: 0: 00 00 01 00 .word 0x00010000
-# CHECK-RVC-NORELAX-NEXT: 4: 00000517 auipc a0, 0x0
-
-# CHECK-RVC-RELAX: 0000000000000000 <.text>:
-# CHECK-RVC-RELAX-NEXT: 0: 0001 nop
-# CHECK-RVC-RELAX-NEXT: 2: 00 01 .short 0x0100
-# CHECK-RVC-RELAX-NEXT: 4: 00 .byte 0x00
-# CHECK-RVC-RELAX-NEXT: 5: 00000517 auipc a0, 0x0
-
# CHECK: 0000000000000000 <.text>:
-# CHECK-NEXT: 0: 00 00 00 00 .word 0x00000000
+# CHECK-NEXT: 0: 00 00 01 00 .word 0x00010000
# CHECK-NEXT: 4: 00000517 auipc a0, 0x0
diff --git a/llvm/test/Transforms/AddDiscriminators/call.ll b/llvm/test/Transforms/AddDiscriminators/call.ll
index d093c65..93d3aa4 100644
--- a/llvm/test/Transforms/AddDiscriminators/call.ll
+++ b/llvm/test/Transforms/AddDiscriminators/call.ll
@@ -12,8 +12,8 @@ define void @_Z3foov() #0 !dbg !4 {
call void @_Z3barv(), !dbg !10
; CHECK: call void @_Z3barv(), !dbg ![[CALL0:[0-9]+]]
%a = alloca [100 x i8], align 16
- call void @llvm.lifetime.start.p0(i64 100, ptr %a), !dbg !11
- call void @llvm.lifetime.end.p0(i64 100, ptr %a), !dbg !11
+ call void @llvm.lifetime.start.p0(ptr %a), !dbg !11
+ call void @llvm.lifetime.end.p0(ptr %a), !dbg !11
call void @_Z3barv(), !dbg !11
; CHECK: call void @_Z3barv(), !dbg ![[CALL1:[0-9]+]]
call void @_Z3barv(), !dbg !12
@@ -22,8 +22,8 @@ define void @_Z3foov() #0 !dbg !4 {
}
declare void @_Z3barv() #1
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind argmemonly
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind argmemonly
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind argmemonly
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind argmemonly
attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
index cc51a00d..9bf8a51 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
@@ -31,11 +31,11 @@ define i1 @test_cmpxchg_seq_cst(ptr %addr, i128 %desire, i128 %new) {
; PWR7-LABEL: @test_cmpxchg_seq_cst(
; PWR7-NEXT: entry:
; PWR7-NEXT: [[TMP0:%.*]] = alloca i128, align 16
-; PWR7-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP0]])
+; PWR7-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP0]])
; PWR7-NEXT: store i128 [[DESIRE:%.*]], ptr [[TMP0]], align 16
; PWR7-NEXT: [[TMP1:%.*]] = call zeroext i1 @__atomic_compare_exchange_16(ptr [[ADDR:%.*]], ptr [[TMP0]], i128 [[NEW:%.*]], i32 5, i32 5)
; PWR7-NEXT: [[TMP2:%.*]] = load i128, ptr [[TMP0]], align 16
-; PWR7-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP0]])
+; PWR7-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP0]])
; PWR7-NEXT: [[TMP3:%.*]] = insertvalue { i128, i1 } poison, i128 [[TMP2]], 0
; PWR7-NEXT: [[TMP4:%.*]] = insertvalue { i128, i1 } [[TMP3]], i1 [[TMP1]], 1
; PWR7-NEXT: [[SUCC:%.*]] = extractvalue { i128, i1 } [[TMP4]], 1
diff --git a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
index 2cbb179..60fb248 100644
--- a/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll
@@ -9,12 +9,12 @@ define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
; CHECK: atomicrmw.start:
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store float [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[PTR]], ptr [[TMP1]], i32 [[TMP3]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { float, i1 } poison, float [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { float, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { float, i1 } [[TMP7]], 1
@@ -35,12 +35,12 @@ define float @test_atomicrmw_fsub_f32(ptr %ptr, float %value) {
; CHECK: atomicrmw.start:
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
; CHECK-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store float [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[PTR]], ptr [[TMP1]], i32 [[TMP3]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { float, i1 } poison, float [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { float, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { float, i1 } [[TMP7]], 1
diff --git a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
index 682c1e6..1d6a32c 100644
--- a/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
+++ b/llvm/test/Transforms/AtomicExpand/SPARC/libcalls.ll
@@ -38,11 +38,11 @@ define i16 @test_exchange_i16(ptr %arg, i16 %val) {
; CHECK-LABEL: @test_cmpxchg_i16(
; CHECK: %1 = alloca i16, align 2
-; CHECK: call void @llvm.lifetime.start.p0(i64 2, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: store i16 %old, ptr %1, align 2
; CHECK: %2 = call zeroext i1 @__atomic_compare_exchange_2(ptr %arg, ptr %1, i16 %new, i32 5, i32 0)
; CHECK: %3 = load i16, ptr %1, align 2
-; CHECK: call void @llvm.lifetime.end.p0(i64 2, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: %4 = insertvalue { i16, i1 } poison, i16 %3, 0
; CHECK: %5 = insertvalue { i16, i1 } %4, i1 %2, 1
; CHECK: %ret = extractvalue { i16, i1 } %5, 0
@@ -68,10 +68,10 @@ define i16 @test_add_i16(ptr %arg, i16 %val) {
; CHECK-LABEL: @test_load_i128(
; CHECK: %1 = alloca i128, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: call void @__atomic_load(i32 16, ptr %arg, ptr %1, i32 5)
; CHECK: %2 = load i128, ptr %1, align 8
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: ret i128 %2
define i128 @test_load_i128(ptr %arg) {
%ret = load atomic i128, ptr %arg seq_cst, align 16
@@ -80,10 +80,10 @@ define i128 @test_load_i128(ptr %arg) {
; CHECK-LABEL: @test_store_i128(
; CHECK: %1 = alloca i128, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: store i128 %val, ptr %1, align 8
; CHECK: call void @__atomic_store(i32 16, ptr %arg, ptr %1, i32 5)
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: ret void
define void @test_store_i128(ptr %arg, i128 %val) {
store atomic i128 %val, ptr %arg seq_cst, align 16
@@ -92,14 +92,14 @@ define void @test_store_i128(ptr %arg, i128 %val) {
; CHECK-LABEL: @test_exchange_i128(
; CHECK: %1 = alloca i128, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: store i128 %val, ptr %1, align 8
; CHECK: %2 = alloca i128, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %2)
; CHECK: call void @__atomic_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5)
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: %3 = load i128, ptr %2, align 8
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %2)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %2)
; CHECK: ret i128 %3
define i128 @test_exchange_i128(ptr %arg, i128 %val) {
%ret = atomicrmw xchg ptr %arg, i128 %val seq_cst
@@ -108,15 +108,15 @@ define i128 @test_exchange_i128(ptr %arg, i128 %val) {
; CHECK-LABEL: @test_cmpxchg_i128(
; CHECK: %1 = alloca i128, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: store i128 %old, ptr %1, align 8
; CHECK: %2 = alloca i128, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %2)
; CHECK: store i128 %new, ptr %2, align 8
; CHECK: %3 = call zeroext i1 @__atomic_compare_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5, i32 0)
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %2)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %2)
; CHECK: %4 = load i128, ptr %1, align 8
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: %5 = insertvalue { i128, i1 } poison, i128 %4, 0
; CHECK: %6 = insertvalue { i128, i1 } %5, i1 %3, 1
; CHECK: %ret = extractvalue { i128, i1 } %6, 0
@@ -139,14 +139,14 @@ define i128 @test_cmpxchg_i128(ptr %arg, i128 %old, i128 %new) {
; CHECK:atomicrmw.start:
; CHECK: %loaded = phi i128 [ %3, %0 ], [ %newloaded, %atomicrmw.start ]
; CHECK: %new = add i128 %loaded, %val
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: store i128 %loaded, ptr %1, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %2)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %2)
; CHECK: store i128 %new, ptr %2, align 8
; CHECK: %4 = call zeroext i1 @__atomic_compare_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5, i32 5)
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %2)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %2)
; CHECK: %5 = load i128, ptr %1, align 8
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: %6 = insertvalue { i128, i1 } poison, i128 %5, 0
; CHECK: %7 = insertvalue { i128, i1 } %6, i1 %4, 1
; CHECK: %success = extractvalue { i128, i1 } %7, 1
@@ -181,12 +181,12 @@ define void @test_store_double(ptr %arg, double %val) {
; CHECK-LABEL: @test_cmpxchg_ptr(
; CHECK: %1 = alloca ptr, align 4
-; CHECK: call void @llvm.lifetime.start.p0(i64 4, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: store ptr %old, ptr %1, align 4
; CHECK: %2 = ptrtoint ptr %new to i32
; CHECK: %3 = call zeroext i1 @__atomic_compare_exchange_4(ptr %arg, ptr %1, i32 %2, i32 5, i32 2)
; CHECK: %4 = load ptr, ptr %1, align 4
-; CHECK: call void @llvm.lifetime.end.p0(i64 4, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: %5 = insertvalue { ptr, i1 } poison, ptr %4, 0
; CHECK: %6 = insertvalue { ptr, i1 } %5, i1 %3, 1
; CHECK: %ret = extractvalue { ptr, i1 } %6, 0
@@ -202,10 +202,10 @@ define ptr @test_cmpxchg_ptr(ptr %arg, ptr %old, ptr %new) {
; CHECK-LABEL: @test_store_fp128
; CHECK: %1 = alloca fp128, align 8
-; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %1)
; CHECK: store fp128 %val, ptr %1, align 8
; CHECK: call void @__atomic_store(i32 16, ptr %arg, ptr %1, i32 5)
-; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
+; CHECK: call void @llvm.lifetime.end.p0(ptr %1)
; CHECK: ret void
define void @test_store_fp128(ptr %arg, fp128 %val) {
store atomic fp128 %val, ptr %arg seq_cst, align 16
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
index 20a9e9f..fda296b 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
@@ -5,10 +5,10 @@
define i256 @atomic_load256_libcall(ptr %ptr) nounwind {
; CHECK-LABEL: @atomic_load256_libcall(
; CHECK-NEXT: [[TMP1:%.*]] = alloca i256, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: call void @__atomic_load(i32 32, ptr [[PTR:%.*]], ptr [[TMP1]], i32 0)
; CHECK-NEXT: [[TMP2:%.*]] = load i256, ptr [[TMP1]], align 16
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: ret i256 [[TMP2]]
;
%result = load atomic i256, ptr %ptr unordered, align 16
@@ -19,10 +19,10 @@ define i256 @atomic_load256_libcall_as1(ptr addrspace(1) %ptr) nounwind {
; CHECK-LABEL: @atomic_load256_libcall_as1(
; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(1) [[PTR:%.*]] to ptr
; CHECK-NEXT: [[TMP2:%.*]] = alloca i256, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP2]])
; CHECK-NEXT: call void @__atomic_load(i32 32, ptr [[TMP1]], ptr [[TMP2]], i32 0)
; CHECK-NEXT: [[TMP3:%.*]] = load i256, ptr [[TMP2]], align 16
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP2]])
; CHECK-NEXT: ret i256 [[TMP3]]
;
%result = load atomic i256, ptr addrspace(1) %ptr unordered, align 16
diff --git a/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll b/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll
index 59916cc..647f187 100644
--- a/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll
+++ b/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll
@@ -361,11 +361,11 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -388,11 +388,11 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -415,11 +415,11 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -442,11 +442,11 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -469,11 +469,11 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -496,11 +496,11 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -523,11 +523,11 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -550,11 +550,11 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -577,11 +577,11 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -604,11 +604,11 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -631,11 +631,11 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -658,11 +658,11 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -685,11 +685,11 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -712,11 +712,11 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -739,11 +739,11 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -766,11 +766,11 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -793,11 +793,11 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -820,11 +820,11 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -847,11 +847,11 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -874,11 +874,11 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
@@ -1251,11 +1251,11 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1278,11 +1278,11 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1305,11 +1305,11 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1332,11 +1332,11 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1359,11 +1359,11 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1386,11 +1386,11 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1413,11 +1413,11 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1440,11 +1440,11 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1467,11 +1467,11 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1494,11 +1494,11 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1521,11 +1521,11 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1548,11 +1548,11 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1575,11 +1575,11 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1602,11 +1602,11 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1629,11 +1629,11 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1656,11 +1656,11 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1683,11 +1683,11 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1710,11 +1710,11 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1737,11 +1737,11 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -1764,11 +1764,11 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
@@ -2112,11 +2112,11 @@ define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2139,11 +2139,11 @@ define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2166,11 +2166,11 @@ define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2193,11 +2193,11 @@ define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2220,11 +2220,11 @@ define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2247,11 +2247,11 @@ define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2274,11 +2274,11 @@ define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2301,11 +2301,11 @@ define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2328,11 +2328,11 @@ define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2355,11 +2355,11 @@ define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2382,11 +2382,11 @@ define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2409,11 +2409,11 @@ define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2436,11 +2436,11 @@ define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2463,11 +2463,11 @@ define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2490,11 +2490,11 @@ define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2517,11 +2517,11 @@ define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2544,11 +2544,11 @@ define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2571,11 +2571,11 @@ define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2598,11 +2598,11 @@ define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
@@ -2625,11 +2625,11 @@ define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5)
; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
diff --git a/llvm/test/Transforms/Attributor/heap_to_stack.ll b/llvm/test/Transforms/Attributor/heap_to_stack.ll
index 6719290..d54f713 100644
--- a/llvm/test/Transforms/Attributor/heap_to_stack.ll
+++ b/llvm/test/Transforms/Attributor/heap_to_stack.ll
@@ -27,7 +27,7 @@ declare i32 @no_return_call() noreturn
declare void @free(ptr nocapture) allockind("free")
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
;.
; CHECK: @G = internal global ptr undef, align 4
diff --git a/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll b/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll
index 0be9434..9a6e0680 100644
--- a/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll
+++ b/llvm/test/Transforms/Attributor/heap_to_stack_gpu.ll
@@ -32,7 +32,7 @@ declare i32 @no_return_call() noreturn
declare void @free(ptr nocapture)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
;.
; CHECK: @G = internal global ptr undef, align 4
diff --git a/llvm/test/Transforms/Attributor/liveness.ll b/llvm/test/Transforms/Attributor/liveness.ll
index 874eff6..c112d99 100644
--- a/llvm/test/Transforms/Attributor/liveness.ll
+++ b/llvm/test/Transforms/Attributor/liveness.ll
@@ -2589,7 +2589,7 @@ define void @bad_gep() {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[N1:%.*]] = alloca i8, i32 0, align 1
; TUNIT-NEXT: [[M2:%.*]] = alloca i8, i32 0, align 1
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1, ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR18:[0-9]+]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR18:[0-9]+]]
; TUNIT-NEXT: br label [[EXIT:%.*]]
; TUNIT: while.body:
; TUNIT-NEXT: unreachable
@@ -2598,7 +2598,7 @@ define void @bad_gep() {
; TUNIT: if.end:
; TUNIT-NEXT: unreachable
; TUNIT: exit:
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 1, ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR18]]
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR18]]
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
@@ -2607,7 +2607,7 @@ define void @bad_gep() {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[N1:%.*]] = alloca i8, i32 0, align 1
; CGSCC-NEXT: [[M2:%.*]] = alloca i8, i32 0, align 1
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1, ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR21:[0-9]+]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR21:[0-9]+]]
; CGSCC-NEXT: br label [[EXIT:%.*]]
; CGSCC: while.body:
; CGSCC-NEXT: unreachable
@@ -2616,13 +2616,13 @@ define void @bad_gep() {
; CGSCC: if.end:
; CGSCC-NEXT: unreachable
; CGSCC: exit:
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 1, ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR21]]
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull captures(none) dereferenceable(1) [[N1]]) #[[ATTR21]]
; CGSCC-NEXT: ret void
;
entry:
%n = alloca i8
%m = alloca i8
- call void @llvm.lifetime.start.p0(i64 1, ptr %n)
+ call void @llvm.lifetime.start.p0(ptr %n)
br label %exit
while.body:
@@ -2640,7 +2640,7 @@ if.end:
br i1 %cmp, label %exit, label %while.body
exit:
- call void @llvm.lifetime.end.p0(i64 1, ptr %n)
+ call void @llvm.lifetime.end.p0(ptr %n)
ret void
}
@@ -2679,8 +2679,8 @@ b2:
declare i1 @bad_gep_helper1(ptr, ptr, ptr)
declare void @bad_gep_helper2(i8)
-declare void @llvm.lifetime.start.p0(i64 %0, ptr %1)
-declare void @llvm.lifetime.end.p0(i64 %0, ptr %1)
+declare void @llvm.lifetime.start.p0(ptr %1)
+declare void @llvm.lifetime.end.p0(ptr %1)
;.
; TUNIT: attributes #[[ATTR0]] = { nofree noreturn nosync nounwind }
; TUNIT: attributes #[[ATTR1:[0-9]+]] = { memory(none) }
diff --git a/llvm/test/Transforms/Attributor/noalias.ll b/llvm/test/Transforms/Attributor/noalias.ll
index 46d9f77..b7c295a 100644
--- a/llvm/test/Transforms/Attributor/noalias.ll
+++ b/llvm/test/Transforms/Attributor/noalias.ll
@@ -577,31 +577,31 @@ define internal fastcc double @strtox(ptr %s, ptr %p, i32 %prec) unnamed_addr {
; TUNIT-SAME: (ptr [[S:%.*]]) unnamed_addr {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[F:%.*]] = alloca [[STRUCT__IO_FILE:%.*]], align 8
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 144, ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) #[[ATTR13:[0-9]+]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) #[[ATTR13:[0-9]+]]
; TUNIT-NEXT: [[CALL:%.*]] = call i32 @sh_fromstring(ptr noundef nonnull align 8 dereferenceable(240) [[F]], ptr [[S]])
; TUNIT-NEXT: call void @__shlim(ptr noundef nonnull align 8 dereferenceable(240) [[F]], i64 noundef 0)
; TUNIT-NEXT: [[CALL1:%.*]] = call double @__floatscan(ptr noundef nonnull align 8 dereferenceable(240) [[F]], i32 noundef 1, i32 noundef 1)
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 144, ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]])
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]])
; TUNIT-NEXT: ret double [[CALL1]]
;
; CGSCC-LABEL: define {{[^@]+}}@strtox
; CGSCC-SAME: (ptr [[S:%.*]]) unnamed_addr {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[F:%.*]] = alloca [[STRUCT__IO_FILE:%.*]], align 8
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 144, ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) #[[ATTR14:[0-9]+]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]]) #[[ATTR14:[0-9]+]]
; CGSCC-NEXT: [[CALL:%.*]] = call i32 @sh_fromstring(ptr noundef nonnull align 8 dereferenceable(240) [[F]], ptr [[S]])
; CGSCC-NEXT: call void @__shlim(ptr noundef nonnull align 8 dereferenceable(240) [[F]], i64 noundef 0)
; CGSCC-NEXT: [[CALL1:%.*]] = call double @__floatscan(ptr noundef nonnull align 8 dereferenceable(240) [[F]], i32 noundef 1, i32 noundef 1)
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 144, ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]])
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 8 captures(none) dereferenceable(240) [[F]])
; CGSCC-NEXT: ret double [[CALL1]]
;
entry:
%f = alloca %struct._IO_FILE, align 8
- call void @llvm.lifetime.start.p0(i64 144, ptr nonnull %f)
+ call void @llvm.lifetime.start.p0(ptr nonnull %f)
%call = call i32 @sh_fromstring(ptr nonnull %f, ptr %s)
call void @__shlim(ptr nonnull %f, i64 0)
%call1 = call double @__floatscan(ptr nonnull %f, i32 %prec, i32 1)
- call void @llvm.lifetime.end.p0(i64 144, ptr nonnull %f)
+ call void @llvm.lifetime.end.p0(ptr nonnull %f)
ret double %call1
}
@@ -620,7 +620,7 @@ entry:
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: optsize
declare dso_local i32 @sh_fromstring(...) local_unnamed_addr
@@ -632,7 +632,7 @@ declare dso_local void @__shlim(ptr, i64) local_unnamed_addr
declare dso_local double @__floatscan(ptr, i32, i32) local_unnamed_addr
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Test 15
; propagate noalias to some callsite arguments that there is no possibly reachable capture before it
diff --git a/llvm/test/Transforms/Attributor/openmp_parallel.ll b/llvm/test/Transforms/Attributor/openmp_parallel.ll
index d7b194d..54da16c 100644
--- a/llvm/test/Transforms/Attributor/openmp_parallel.ll
+++ b/llvm/test/Transforms/Attributor/openmp_parallel.ll
@@ -68,13 +68,13 @@ define internal void @.omp_outlined.(ptr noalias nocapture readonly %.global_tid
; TUNIT-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
; TUNIT-NEXT: br label [[OMP_PRECOND_THEN:%.*]]
; TUNIT: omp.precond.then:
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) #[[ATTR3:[0-9]+]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) #[[ATTR3:[0-9]+]]
; TUNIT-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) #[[ATTR3]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) #[[ATTR3]]
; TUNIT-NEXT: store i32 197, ptr [[DOTOMP_UB]], align 4
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) #[[ATTR3]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) #[[ATTR3]]
; TUNIT-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) #[[ATTR3]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) #[[ATTR3]]
; TUNIT-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
; TUNIT-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4
; TUNIT-NEXT: call void @__kmpc_for_static_init_4(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB1]], i32 [[TMP0]], i32 noundef 34, ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_IS_LAST]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_LB]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_UB]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_STRIDE]], i32 noundef 1, i32 noundef 1)
@@ -103,10 +103,10 @@ define internal void @.omp_outlined.(ptr noalias nocapture readonly %.global_tid
; TUNIT-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY]], label [[OMP_LOOP_EXIT]]
; TUNIT: omp.loop.exit:
; TUNIT-NEXT: call void @__kmpc_for_static_fini(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB1]], i32 [[TMP0]])
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]])
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]])
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]])
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]])
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]])
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]])
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]])
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]])
; TUNIT-NEXT: br label [[OMP_PRECOND_END:%.*]]
; TUNIT: omp.precond.end:
; TUNIT-NEXT: ret void
@@ -124,13 +124,13 @@ define internal void @.omp_outlined.(ptr noalias nocapture readonly %.global_tid
; CGSCC-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], 1
; CGSCC-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
; CGSCC: omp.precond.then:
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) #[[ATTR3:[0-9]+]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]]) #[[ATTR3:[0-9]+]]
; CGSCC-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) #[[ATTR3]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]]) #[[ATTR3]]
; CGSCC-NEXT: store i32 [[SUB2]], ptr [[DOTOMP_UB]], align 4
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) #[[ATTR3]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]]) #[[ATTR3]]
; CGSCC-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) #[[ATTR3]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]]) #[[ATTR3]]
; CGSCC-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
; CGSCC-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4
; CGSCC-NEXT: call void @__kmpc_for_static_init_4(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB1]], i32 [[TMP1]], i32 noundef 34, ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_IS_LAST]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_LB]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_UB]], ptr noundef nonnull align 4 dereferenceable(4) [[DOTOMP_STRIDE]], i32 noundef 1, i32 noundef 1)
@@ -159,10 +159,10 @@ define internal void @.omp_outlined.(ptr noalias nocapture readonly %.global_tid
; CGSCC-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY]], label [[OMP_LOOP_EXIT]]
; CGSCC: omp.loop.exit:
; CGSCC-NEXT: call void @__kmpc_for_static_fini(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB1]], i32 [[TMP1]])
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]])
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]])
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]])
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]])
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_IS_LAST]])
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_STRIDE]])
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_UB]])
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[DOTOMP_LB]])
; CGSCC-NEXT: br label [[OMP_PRECOND_END]]
; CGSCC: omp.precond.end:
; CGSCC-NEXT: ret void
@@ -178,13 +178,13 @@ entry:
br i1 %cmp, label %omp.precond.then, label %omp.precond.end
omp.precond.then: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %.omp.lb) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %.omp.lb) #3
store i32 0, ptr %.omp.lb, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %.omp.ub) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %.omp.ub) #3
store i32 %sub2, ptr %.omp.ub, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %.omp.stride) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %.omp.stride) #3
store i32 1, ptr %.omp.stride, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %.omp.is_last) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %.omp.is_last) #3
store i32 0, ptr %.omp.is_last, align 4
%1 = load i32, ptr %.global_tid., align 4
call void @__kmpc_for_static_init_4(ptr nonnull @1, i32 %1, i32 34, ptr nonnull %.omp.is_last, ptr nonnull %.omp.lb, ptr nonnull %.omp.ub, ptr nonnull %.omp.stride, i32 1, i32 1) #3
@@ -216,10 +216,10 @@ omp.inner.for.body: ; preds = %omp.inner.for.body,
omp.loop.exit: ; preds = %omp.inner.for.body, %omp.precond.then
call void @__kmpc_for_static_fini(ptr nonnull @1, i32 %1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %.omp.is_last) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %.omp.stride) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %.omp.ub) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %.omp.lb) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %.omp.is_last) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %.omp.stride) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %.omp.ub) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %.omp.lb) #3
br label %omp.precond.end
omp.precond.end: ; preds = %omp.loop.exit, %entry
@@ -227,10 +227,10 @@ omp.precond.end: ; preds = %omp.loop.exit, %ent
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
declare dso_local void @__kmpc_for_static_init_4(ptr, i32, i32, ptr, ptr, ptr, ptr, i32, i32) local_unnamed_addr
diff --git a/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll b/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll
index 472ed30..eb7d78f 100644
--- a/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll
+++ b/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll
@@ -201,7 +201,7 @@ declare dso_local void @_ZNSt6vectorIN12_GLOBAL__N_18TestCaseESaIS1_EED2Ev(ptr)
declare dso_local void @_Z11BM_functionRN9benchmark5StateE(ptr dereferenceable(144)) #0
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.start.p0(ptr nocapture) #5
; Function Attrs: alwaysinline uwtable
declare dso_local { i64, ptr } @_ZN9benchmark5State5beginEv(ptr) #6 align 2
@@ -216,7 +216,7 @@ declare dso_local zeroext i1 @_ZNK9benchmark5State13StateIteratorneERKS1_(ptr, p
declare dso_local void @_ZNK9benchmark5State13StateIteratordeEv(ptr) #7 align 2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.end.p0(ptr nocapture) #5
; Function Attrs: alwaysinline nounwind uwtable
declare dso_local dereferenceable(16) ptr @_ZN9benchmark5State13StateIteratorppEv(ptr) #7 align 2
diff --git a/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll b/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll
index fa942c9..82bed0f 100644
--- a/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll
+++ b/llvm/test/Transforms/Attributor/value-simplify-pointer-info.ll
@@ -116,7 +116,7 @@ define void @local_alloca_simplifiable_1(ptr noalias sret(%struct.S) align 4 %ag
; TUNIT-SAME: (ptr noalias nofree writeonly sret([[STRUCT_S:%.*]]) align 4 captures(none) dereferenceable_or_null(24) [[AGG_RESULT:%.*]]) #[[ATTR1:[0-9]+]] {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[S:%.*]] = alloca [[STRUCT_S]], align 4
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 24, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR17:[0-9]+]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR17:[0-9]+]]
; TUNIT-NEXT: [[F1:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 3
; TUNIT-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 4
; TUNIT-NEXT: [[F3:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 5
@@ -136,7 +136,7 @@ define void @local_alloca_simplifiable_1(ptr noalias sret(%struct.S) align 4 %ag
; TUNIT-NEXT: store i32 4, ptr [[I212]], align 4, !tbaa [[TBAA13:![0-9]+]]
; TUNIT-NEXT: [[I316:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[AGG_RESULT]], i64 0, i32 2
; TUNIT-NEXT: store i32 4, ptr [[I316]], align 4, !tbaa [[TBAA14:![0-9]+]]
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 24, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR17]]
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(argmem: readwrite)
@@ -144,7 +144,7 @@ define void @local_alloca_simplifiable_1(ptr noalias sret(%struct.S) align 4 %ag
; CGSCC-SAME: (ptr noalias nofree noundef nonnull writeonly sret([[STRUCT_S:%.*]]) align 4 captures(none) dereferenceable(24) [[AGG_RESULT:%.*]]) #[[ATTR1:[0-9]+]] {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[S:%.*]] = alloca [[STRUCT_S]], align 4
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 24, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR20:[0-9]+]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR20:[0-9]+]]
; CGSCC-NEXT: [[F1:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 3
; CGSCC-NEXT: store float 0x3FF19999A0000000, ptr [[F1]], align 4, !tbaa [[TBAA7:![0-9]+]]
; CGSCC-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i64 0, i32 4
@@ -185,12 +185,12 @@ define void @local_alloca_simplifiable_1(ptr noalias sret(%struct.S) align 4 %ag
; CGSCC-NEXT: [[ADD15:%.*]] = add nsw i32 [[I10]], [[I11]]
; CGSCC-NEXT: [[I316:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[AGG_RESULT]], i64 0, i32 2
; CGSCC-NEXT: store i32 [[ADD15]], ptr [[I316]], align 4, !tbaa [[TBAA14]]
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 24, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(24) [[S]]) #[[ATTR20]]
; CGSCC-NEXT: ret void
;
entry:
%s = alloca %struct.S, align 4
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %s)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s)
%f1 = getelementptr inbounds %struct.S, ptr %s, i64 0, i32 3
store float 0x3FF19999A0000000, ptr %f1, align 4, !tbaa !7
%f2 = getelementptr inbounds %struct.S, ptr %s, i64 0, i32 4
@@ -231,13 +231,13 @@ entry:
%add15 = add nsw i32 %i10, %i11
%i316 = getelementptr inbounds %struct.S, ptr %agg.result, i64 0, i32 2
store i32 %add15, ptr %i316, align 4, !tbaa !14
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %s)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; void local_alloca_simplifiable_2(void) {
; char Bytes[1024];
@@ -260,7 +260,7 @@ define void @local_alloca_simplifiable_2() {
; TUNIT-SAME: () #[[ATTR3:[0-9]+]] {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1024, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR17]]
; TUNIT-NEXT: br label [[FOR_COND:%.*]]
; TUNIT: for.cond:
; TUNIT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ]
@@ -326,7 +326,7 @@ define void @local_alloca_simplifiable_2() {
; TUNIT-NEXT: [[INDVARS_IV_NEXT13]] = add nuw nsw i64 [[INDVARS_IV12]], 1
; TUNIT-NEXT: br label [[FOR_COND28]], !llvm.loop [[LOOP20:![0-9]+]]
; TUNIT: for.end38:
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 1024, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR17]]
; TUNIT-NEXT: ret void
;
; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn
@@ -334,7 +334,7 @@ define void @local_alloca_simplifiable_2() {
; CGSCC-SAME: () #[[ATTR3:[0-9]+]] {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[BYTES:%.*]] = alloca [1024 x i8], align 16
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 1024, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR20]]
; CGSCC-NEXT: br label [[FOR_COND:%.*]]
; CGSCC: for.cond:
; CGSCC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ]
@@ -406,12 +406,12 @@ define void @local_alloca_simplifiable_2() {
; CGSCC-NEXT: [[INDVARS_IV_NEXT13]] = add nuw nsw i64 [[INDVARS_IV12]], 1
; CGSCC-NEXT: br label [[FOR_COND28]], !llvm.loop [[LOOP23:![0-9]+]]
; CGSCC: for.end38:
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 1024, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(1024) [[BYTES]]) #[[ATTR20]]
; CGSCC-NEXT: ret void
;
entry:
%Bytes = alloca [1024 x i8], align 16
- call void @llvm.lifetime.start.p0(i64 1024, ptr nonnull %Bytes)
+ call void @llvm.lifetime.start.p0(ptr nonnull %Bytes)
br label %for.cond
for.cond: ; preds = %for.inc, %entry
@@ -503,7 +503,7 @@ for.inc36: ; preds = %for.body31
br label %for.cond28, !llvm.loop !23
for.end38: ; preds = %for.cond.cleanup30
- call void @llvm.lifetime.end.p0(i64 1024, ptr nonnull %Bytes)
+ call void @llvm.lifetime.end.p0(ptr nonnull %Bytes)
ret void
}
@@ -558,7 +558,7 @@ define i32 @multi_obj_simplifiable_1(i32 %cnd) {
; TUNIT-SAME: (i32 [[CND:%.*]]) #[[ATTR3]] {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[L:%.*]] = alloca i32, align 4
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
; TUNIT-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CND]], 0
; TUNIT-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[COND_TRUE:%.*]]
; TUNIT: cond.true:
@@ -566,7 +566,7 @@ define i32 @multi_obj_simplifiable_1(i32 %cnd) {
; TUNIT: cond.false:
; TUNIT-NEXT: br label [[COND_END]]
; TUNIT: cond.end:
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
; TUNIT-NEXT: ret i32 5
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn
@@ -574,7 +574,7 @@ define i32 @multi_obj_simplifiable_1(i32 %cnd) {
; CGSCC-SAME: (i32 [[CND:%.*]]) #[[ATTR5:[0-9]+]] {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[L:%.*]] = alloca i32, align 4
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
; CGSCC-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CND]], 0
; CGSCC-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[COND_TRUE:%.*]]
; CGSCC: cond.true:
@@ -582,12 +582,12 @@ define i32 @multi_obj_simplifiable_1(i32 %cnd) {
; CGSCC: cond.false:
; CGSCC-NEXT: br label [[COND_END]]
; CGSCC: cond.end:
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
; CGSCC-NEXT: ret i32 5
;
entry:
%L = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %L)
+ call void @llvm.lifetime.start.p0(ptr nonnull %L)
store i32 5, ptr @GI1, align 4, !tbaa !3
store i32 5, ptr %L, align 4, !tbaa !3
%tobool.not = icmp eq i32 %cnd, 0
@@ -602,7 +602,7 @@ cond.false: ; preds = %entry
cond.end: ; preds = %cond.false, %cond.true
%cond = phi ptr [ @GI1, %cond.true ], [ %L, %cond.false ]
%i1 = load i32, ptr %cond, align 4, !tbaa !3
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %L)
+ call void @llvm.lifetime.end.p0(ptr nonnull %L)
ret i32 %i1
}
@@ -620,7 +620,7 @@ define i32 @multi_obj_simplifiable_2(i32 %cnd) {
; TUNIT-SAME: (i32 [[CND:%.*]]) #[[ATTR3]] {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[L:%.*]] = alloca i32, align 4
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
; TUNIT-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CND]], 0
; TUNIT-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[COND_TRUE:%.*]]
; TUNIT: cond.true:
@@ -628,7 +628,7 @@ define i32 @multi_obj_simplifiable_2(i32 %cnd) {
; TUNIT: cond.false:
; TUNIT-NEXT: br label [[COND_END]]
; TUNIT: cond.end:
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR17]]
; TUNIT-NEXT: ret i32 5
;
; CGSCC: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn
@@ -636,7 +636,7 @@ define i32 @multi_obj_simplifiable_2(i32 %cnd) {
; CGSCC-SAME: (i32 [[CND:%.*]]) #[[ATTR5]] {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[L:%.*]] = alloca i32, align 4
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
; CGSCC-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CND]], 0
; CGSCC-NEXT: br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[COND_TRUE:%.*]]
; CGSCC: cond.true:
@@ -644,12 +644,12 @@ define i32 @multi_obj_simplifiable_2(i32 %cnd) {
; CGSCC: cond.false:
; CGSCC-NEXT: br label [[COND_END]]
; CGSCC: cond.end:
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[L]]) #[[ATTR20]]
; CGSCC-NEXT: ret i32 5
;
entry:
%L = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %L)
+ call void @llvm.lifetime.start.p0(ptr nonnull %L)
%tobool.not = icmp eq i32 %cnd, 0
br i1 %tobool.not, label %cond.false, label %cond.true
@@ -663,7 +663,7 @@ cond.end: ; preds = %cond.false, %cond.t
%cond = phi ptr [ @GI2, %cond.true ], [ %L, %cond.false ]
store i32 5, ptr %cond, align 4, !tbaa !3
%l = load i32, ptr %cond, align 4, !tbaa !3
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %L)
+ call void @llvm.lifetime.end.p0(ptr nonnull %L)
ret i32 %l
}
@@ -1528,8 +1528,8 @@ define i32 @local_alloca_not_simplifiable_1() {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[X:%.*]] = alloca i32, align 4
; TUNIT-NEXT: [[Y:%.*]] = alloca i32, align 4
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) #[[ATTR17]]
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) #[[ATTR17]]
; TUNIT-NEXT: store i32 1, ptr [[Y]], align 4, !tbaa [[TBAA3]]
; TUNIT-NEXT: store i32 1, ptr [[X]], align 4, !tbaa [[TBAA3]]
; TUNIT-NEXT: call void @escape(ptr noundef nonnull align 4 dereferenceable(4) [[X]])
@@ -1540,16 +1540,16 @@ define i32 @local_alloca_not_simplifiable_1() {
; TUNIT-NEXT: [[I4:%.*]] = load i32, ptr [[Y]], align 4, !tbaa [[TBAA3]]
; TUNIT-NEXT: [[ADD:%.*]] = add nsw i32 [[I3]], [[I4]]
; TUNIT-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[COND]]
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]])
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]])
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]])
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]])
; TUNIT-NEXT: ret i32 [[ADD1]]
;
; CGSCC-LABEL: define {{[^@]+}}@local_alloca_not_simplifiable_1() {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[X:%.*]] = alloca i32, align 4
; CGSCC-NEXT: [[Y:%.*]] = alloca i32, align 4
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) #[[ATTR20]]
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]]) #[[ATTR20]]
; CGSCC-NEXT: store i32 1, ptr [[Y]], align 4, !tbaa [[TBAA3]]
; CGSCC-NEXT: store i32 1, ptr [[X]], align 4, !tbaa [[TBAA3]]
; CGSCC-NEXT: call void @escape(ptr noundef nonnull align 4 dereferenceable(4) [[X]])
@@ -1560,15 +1560,15 @@ define i32 @local_alloca_not_simplifiable_1() {
; CGSCC-NEXT: [[I4:%.*]] = load i32, ptr [[Y]], align 4, !tbaa [[TBAA3]]
; CGSCC-NEXT: [[ADD:%.*]] = add nsw i32 [[I3]], [[I4]]
; CGSCC-NEXT: [[ADD1:%.*]] = add nsw i32 [[ADD]], [[COND]]
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]])
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]])
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[Y]])
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[X]])
; CGSCC-NEXT: ret i32 [[ADD1]]
;
entry:
%X = alloca i32, align 4
%Y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %X)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %Y)
+ call void @llvm.lifetime.start.p0(ptr nonnull %X)
+ call void @llvm.lifetime.start.p0(ptr nonnull %Y)
store i32 1, ptr %Y, align 4, !tbaa !3
store i32 1, ptr %X, align 4, !tbaa !3
call void @escape(ptr nonnull %X)
@@ -1579,8 +1579,8 @@ entry:
%i4 = load i32, ptr %Y, align 4, !tbaa !3
%add = add nsw i32 %i3, %i4
%add1 = add nsw i32 %add, %cond
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %Y)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %X)
+ call void @llvm.lifetime.end.p0(ptr nonnull %Y)
+ call void @llvm.lifetime.end.p0(ptr nonnull %X)
ret i32 %add1
}
@@ -2755,7 +2755,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3
; TUNIT-SAME: (ptr nofree readonly captures(none) [[IN:%.*]], ptr nofree writeonly captures(none) [[OUT:%.*]], i32 [[IDX:%.*]]) #[[ATTR1]] {
; TUNIT-NEXT: entry:
; TUNIT-NEXT: [[BUF:%.*]] = alloca [128 x i32], align 16
-; TUNIT-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 512, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR17]]
; TUNIT-NEXT: br label [[FOR_COND:%.*]]
; TUNIT: for.cond:
; TUNIT-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
@@ -2776,7 +2776,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3
; TUNIT-NEXT: [[CMP5:%.*]] = icmp slt i32 [[I3_0]], 128
; TUNIT-NEXT: br i1 [[CMP5]], label [[FOR_BODY7]], label [[FOR_COND_CLEANUP6:%.*]]
; TUNIT: for.cond.cleanup6:
-; TUNIT-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 512, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR17]]
+; TUNIT-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR17]]
; TUNIT-NEXT: ret void
; TUNIT: for.body7:
; TUNIT-NEXT: [[IDXPROM8:%.*]] = sext i32 [[I3_0]] to i64
@@ -2797,7 +2797,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3
; CGSCC-SAME: (ptr nofree readonly captures(none) [[IN:%.*]], ptr nofree writeonly captures(none) [[OUT:%.*]], i32 [[IDX:%.*]]) #[[ATTR13:[0-9]+]] {
; CGSCC-NEXT: entry:
; CGSCC-NEXT: [[BUF:%.*]] = alloca [128 x i32], align 16
-; CGSCC-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 512, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.start.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR20]]
; CGSCC-NEXT: br label [[FOR_COND:%.*]]
; CGSCC: for.cond:
; CGSCC-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ]
@@ -2818,7 +2818,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3
; CGSCC-NEXT: [[CMP5:%.*]] = icmp slt i32 [[I3_0]], 128
; CGSCC-NEXT: br i1 [[CMP5]], label [[FOR_BODY7]], label [[FOR_COND_CLEANUP6:%.*]]
; CGSCC: for.cond.cleanup6:
-; CGSCC-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 512, ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR20]]
+; CGSCC-NEXT: call void @llvm.lifetime.end.p0(ptr noalias nofree noundef nonnull align 16 captures(none) dereferenceable(512) [[BUF]]) #[[ATTR20]]
; CGSCC-NEXT: ret void
; CGSCC: for.body7:
; CGSCC-NEXT: [[IDXPROM8:%.*]] = sext i32 [[I3_0]] to i64
@@ -2836,7 +2836,7 @@ define hidden void @no_propagation_of_unknown_index_access(ptr %in, ptr %out, i3
;
entry:
%buf = alloca [128 x i32], align 16
- call void @llvm.lifetime.start.p0(i64 512, ptr %buf) #2
+ call void @llvm.lifetime.start.p0(ptr %buf) #2
br label %for.cond
for.cond: ; preds = %for.body, %entry
@@ -2862,7 +2862,7 @@ for.cond4: ; preds = %for.body7, %for.con
br i1 %cmp5, label %for.body7, label %for.cond.cleanup6
for.cond.cleanup6: ; preds = %for.cond4
- call void @llvm.lifetime.end.p0(i64 512, ptr %buf) #2
+ call void @llvm.lifetime.end.p0(ptr %buf) #2
ret void
for.body7: ; preds = %for.cond4
diff --git a/llvm/test/Transforms/CodeExtractor/LoopExtractor_alloca.ll b/llvm/test/Transforms/CodeExtractor/LoopExtractor_alloca.ll
index b932a7d..09abf1f 100644
--- a/llvm/test/Transforms/CodeExtractor/LoopExtractor_alloca.ll
+++ b/llvm/test/Transforms/CodeExtractor/LoopExtractor_alloca.ll
@@ -30,16 +30,16 @@ entry:
br label %loop1
loop1:
- call void @llvm.lifetime.start.p0(i64 4, ptr %v1)
+ call void @llvm.lifetime.start.p0(ptr %v1)
%r1 = call i32 @foo(ptr %v1)
- call void @llvm.lifetime.end.p0(i64 4, ptr %v1)
+ call void @llvm.lifetime.end.p0(ptr %v1)
%cmp1 = icmp ne i32 %r1, 0
br i1 %cmp1, label %loop1, label %loop2
loop2:
- call void @llvm.lifetime.start.p0(i64 4, ptr %v2)
+ call void @llvm.lifetime.start.p0(ptr %v2)
%r2 = call i32 @foo(ptr %v2)
- call void @llvm.lifetime.end.p0(i64 4, ptr %v2)
+ call void @llvm.lifetime.end.p0(ptr %v2)
%cmp2 = icmp ne i32 %r2, 0
br i1 %cmp2, label %loop2, label %exit
@@ -49,6 +49,6 @@ exit:
declare i32 @foo(ptr)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca.ll
index 9cdc37a..a24bb74 100644
--- a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca.ll
+++ b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca.ll
@@ -20,11 +20,11 @@ bb:
br i1 %tmp4, label %bb6, label %bb5
bb5: ; preds = %bb
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #2
store i32 %tmp3, ptr %tmp, align 4, !tbaa !2
store i32 %tmp3, ptr @g, align 4, !tbaa !2
call void @bar(ptr nonnull %tmp) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #2
br label %bb6
bb6: ; preds = %bb5, %bb
@@ -32,14 +32,14 @@ bb6: ; preds = %bb5, %bb
ret i32 %tmp7
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @bar(ptr) local_unnamed_addr #2
declare void @bar2(ptr, ptr) local_unnamed_addr #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define i32 @caller(i32 %arg) local_unnamed_addr #0 {
diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca2.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca2.ll
index f4a37e7..22c0baf 100644
--- a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca2.ll
+++ b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca2.ll
@@ -18,10 +18,10 @@ bb:
br i1 %tmp4, label %bb6, label %bb5
bb5: ; preds = %bb
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #2
store i32 %tmp3, ptr @g, align 4, !tbaa !2
call void @bar(ptr nonnull %tmp) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #2
br label %bb6
bb6: ; preds = %bb5, %bb
@@ -30,14 +30,14 @@ bb6: ; preds = %bb5, %bb
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @bar(ptr) local_unnamed_addr #2
declare void @bar2(ptr, ptr) local_unnamed_addr #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define i32 @caller(i32 %arg) local_unnamed_addr #0 {
diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll
index bd51910..8b9c5dd 100644
--- a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll
+++ b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca4.ll
@@ -7,7 +7,7 @@
@g = external local_unnamed_addr global i32, align 4
; CHECK-LABEL: define{{.*}}@caller(
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %tmp.i)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %tmp.i)
; CHECK-NEXT: call void @callee_unknown_use1.{{.*}}(ptr %tmp.i
define i32 @callee_unknown_use1(i32 %arg) local_unnamed_addr #0 {
@@ -21,11 +21,11 @@ bb:
br i1 %tmp4, label %bb6, label %bb5
bb5: ; preds = %bb
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #2
store i32 %tmp3, ptr @g, align 4, !tbaa !2
%tmp11 = bitcast ptr %tmp to ptr
call void @bar(ptr nonnull %tmp11) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #2
br label %bb6
bb6: ; preds = %bb5, %bb
@@ -36,14 +36,14 @@ bb6: ; preds = %bb5, %bb
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @bar(ptr) local_unnamed_addr #2
declare void @bar2(ptr, ptr) local_unnamed_addr #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define i32 @caller(i32 %arg) local_unnamed_addr #0 {
diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll
index 54782c5..10be1c8 100644
--- a/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll
+++ b/llvm/test/Transforms/CodeExtractor/PartialInlineAlloca5.ll
@@ -18,11 +18,11 @@ bb:
br i1 %tmp4, label %bb6, label %bb5
bb5: ; preds = %bb
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #2
store i32 %tmp3, ptr %tmp, align 4, !tbaa !2
store i32 %tmp3, ptr @g, align 4, !tbaa !2
call void @bar(ptr nonnull %tmp) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #2
br label %bb6
bb6: ; preds = %bb5, %bb
@@ -32,14 +32,14 @@ bb6: ; preds = %bb5, %bb
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @bar(ptr) local_unnamed_addr #2
declare void @bar2(ptr, ptr) local_unnamed_addr #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define i32 @caller(i32 %arg) local_unnamed_addr #0 {
diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineInvokeProducesOutVal.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineInvokeProducesOutVal.ll
index bdf9e23..5e0ce20 100644
--- a/llvm/test/Transforms/CodeExtractor/PartialInlineInvokeProducesOutVal.ll
+++ b/llvm/test/Transforms/CodeExtractor/PartialInlineInvokeProducesOutVal.ll
@@ -26,10 +26,10 @@ bb5: ; preds = %bb4, %bb1, %bb
; CHECK-LABEL: bb:
; CHECK-NEXT: [[CALL26LOC:%.*]] = alloca ptr
; CHECK-LABEL: codeRepl.i:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[CALL26LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[CALL26LOC]])
; CHECK-NEXT: call void @bar.1.bb1(ptr [[CALL26LOC]])
; CHECK-NEXT: %call26.reload.i = load ptr, ptr [[CALL26LOC]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[CALL26LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[CALL26LOC]])
define ptr @dummy_caller(i32 %arg) {
bb:
%tmp = tail call ptr @bar(i32 %arg)
diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink.ll b/llvm/test/Transforms/CodeExtractor/live_shrink.ll
index f5debc503..43cc248 100644
--- a/llvm/test/Transforms/CodeExtractor/live_shrink.ll
+++ b/llvm/test/Transforms/CodeExtractor/live_shrink.ll
@@ -8,7 +8,7 @@
define void @_Z3foov() local_unnamed_addr {
bb:
%tmp = alloca %class.A, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp)
%tmp2 = load i32, ptr @cond, align 4, !tbaa !2
%tmp3 = icmp eq i32 %tmp2, 0
br i1 %tmp3, label %bb4, label %bb5
@@ -18,17 +18,17 @@ bb4: ; preds = %bb
br label %bb5
bb5: ; preds = %bb4, %bb
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
ret void
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Function Attrs: uwtable
define void @_Z3goov() local_unnamed_addr {
@@ -49,8 +49,8 @@ bb:
; CHECK-LABEL: define internal void @_Z3foov.1.
; CHECK: newFuncRoot:
; CHECK-NEXT: %tmp = alloca %class.A
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp)
-; CHECK: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull %tmp)
+; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
; CHECK-NEXT: br label %bb5.exitStub
diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink_gep.ll b/llvm/test/Transforms/CodeExtractor/live_shrink_gep.ll
index e9d5fb6..ef815ad 100644
--- a/llvm/test/Transforms/CodeExtractor/live_shrink_gep.ll
+++ b/llvm/test/Transforms/CodeExtractor/live_shrink_gep.ll
@@ -9,7 +9,7 @@
define void @_Z3foov() local_unnamed_addr {
bb:
%tmp = alloca %class.A, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp)
%tmp2 = load i32, ptr @cond, align 4, !tbaa !2
%tmp3 = icmp eq i32 %tmp2, 0
br i1 %tmp3, label %bb4, label %bb5
@@ -19,17 +19,17 @@ bb4: ; preds = %bb
br label %bb5
bb5: ; preds = %bb4, %bb
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
ret void
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Function Attrs: uwtable
define void @_Z3goov() local_unnamed_addr {
diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink_hoist.ll b/llvm/test/Transforms/CodeExtractor/live_shrink_hoist.ll
index 6f63bca..7074854 100644
--- a/llvm/test/Transforms/CodeExtractor/live_shrink_hoist.ll
+++ b/llvm/test/Transforms/CodeExtractor/live_shrink_hoist.ll
@@ -9,7 +9,7 @@
define void @_Z3foov() local_unnamed_addr {
bb:
%tmp = alloca %class.A, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp)
%tmp2 = load i32, ptr @cond, align 4, !tbaa !2
%tmp3 = icmp eq i32 %tmp2, 0
br i1 %tmp3, label %bb4, label %bb9
@@ -29,17 +29,17 @@ bb8: ; preds = %bb4
br label %bb9
bb9: ; preds = %bb8, %bb4, %bb
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
ret void
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Function Attrs: uwtable
define void @_Z3goov() local_unnamed_addr {
@@ -50,7 +50,7 @@ bb:
; CHECK-LABEL: define internal void @_Z3foov.1.
; CHECK: bb9:
-; CHECK: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp)
+; CHECK: call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
; CHECK: br label %.exitStub
diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink_multiple.ll b/llvm/test/Transforms/CodeExtractor/live_shrink_multiple.ll
index 2512ac9..1d0af23 100644
--- a/llvm/test/Transforms/CodeExtractor/live_shrink_multiple.ll
+++ b/llvm/test/Transforms/CodeExtractor/live_shrink_multiple.ll
@@ -8,8 +8,8 @@ define void @_Z3foov() local_unnamed_addr {
bb:
%tmp = alloca %class.A, align 4
%tmp1 = alloca %class.A, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp1)
%tmp4 = load i32, ptr @cond, align 4, !tbaa !2
%tmp5 = icmp eq i32 %tmp4, 0
br i1 %tmp5, label %bb6, label %bb7
@@ -19,18 +19,18 @@ bb6: ; preds = %bb
br label %bb7
bb7: ; preds = %bb6, %bb
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
ret void
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Function Attrs: uwtable
define void @_Z3goov() local_unnamed_addr {
diff --git a/llvm/test/Transforms/CodeExtractor/live_shrink_unsafe.ll b/llvm/test/Transforms/CodeExtractor/live_shrink_unsafe.ll
index 7942418..c5bd626 100644
--- a/llvm/test/Transforms/CodeExtractor/live_shrink_unsafe.ll
+++ b/llvm/test/Transforms/CodeExtractor/live_shrink_unsafe.ll
@@ -14,8 +14,8 @@ define void @_Z3foo_unknown_mem_accessv() local_unnamed_addr {
bb:
%tmp = alloca %class.A, align 4
%tmp1 = alloca %class.A, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp1)
%tmp4 = load ptr, ptr @condptr, align 8, !tbaa !2
%tmp5 = load i32, ptr %tmp4, align 4, !tbaa !6
%tmp6 = icmp eq i32 %tmp5, 0
@@ -26,20 +26,20 @@ bb7: ; preds = %bb
br label %bb8
bb8: ; preds = %bb7, %bb
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
ret void
}
declare void @_Z3barv() local_unnamed_addr
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @_ZN1A7memfuncEv(ptr) local_unnamed_addr
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @_Z3foo_unknown_calli(i32 %arg) local_unnamed_addr {
bb:
%tmp = alloca %class.A, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp)
tail call void @_Z3barv()
%tmp2 = icmp eq i32 %arg, 0
br i1 %tmp2, label %bb3, label %bb4
@@ -49,7 +49,7 @@ bb3: ; preds = %bb
br label %bb4
bb4: ; preds = %bb3, %bb
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp)
ret void
}
diff --git a/llvm/test/Transforms/CodeGenPrepare/ARM/tailcall-dup.ll b/llvm/test/Transforms/CodeGenPrepare/ARM/tailcall-dup.ll
index 3f113e6..d2b79ab 100644
--- a/llvm/test/Transforms/CodeGenPrepare/ARM/tailcall-dup.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/ARM/tailcall-dup.ll
@@ -4,8 +4,8 @@ target triple = "armv8m.main-none-eabi"
declare ptr @f0()
declare ptr @f1()
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
define ptr @tail_dup() {
; CHECK-LABEL: tail_dup
@@ -15,7 +15,7 @@ define ptr @tail_dup() {
; CHECK-NEXT: ret ptr
bb0:
%a = alloca i32
- call void @llvm.lifetime.start.p0(i64 -1, ptr %a) nounwind
+ call void @llvm.lifetime.start.p0(ptr %a) nounwind
%tmp0 = tail call ptr @f0()
br label %return
bb1:
@@ -23,7 +23,7 @@ bb1:
br label %return
return:
%retval = phi ptr [ %tmp0, %bb0 ], [ %tmp1, %bb1 ]
- call void @llvm.lifetime.end.p0(i64 -1, ptr %a) nounwind
+ call void @llvm.lifetime.end.p0(ptr %a) nounwind
ret ptr %retval
}
diff --git a/llvm/test/Transforms/CodeGenPrepare/X86/tailcall-assume-xbb.ll b/llvm/test/Transforms/CodeGenPrepare/X86/tailcall-assume-xbb.ll
index dd47d5e..f72756d 100644
--- a/llvm/test/Transforms/CodeGenPrepare/X86/tailcall-assume-xbb.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/tailcall-assume-xbb.ll
@@ -14,7 +14,7 @@
define ptr @foo(i64 %size, i64 %v1, i64 %v2) {
entry:
%a = alloca i8
- call void @llvm.lifetime.start.p0(i64 -1, ptr %a) nounwind
+ call void @llvm.lifetime.start.p0(ptr %a) nounwind
%cmp1 = icmp ult i64 %size, 1025
br i1 %cmp1, label %if.end, label %case1
@@ -42,12 +42,12 @@ exit1:
exit2:
%retval2 = phi ptr [ %ret1, %case1 ], [ %retval1, %exit1 ]
- call void @llvm.lifetime.end.p0(i64 -1, ptr %a) nounwind
+ call void @llvm.lifetime.end.p0(ptr %a) nounwind
ret ptr %retval2
}
declare void @llvm.assume(i1)
declare ptr @qux()
declare ptr @bar()
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
diff --git a/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll b/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll
index 6bf268b..6997c9f 100644
--- a/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll
+++ b/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll
@@ -65,25 +65,25 @@ define void @test_free_intrinsics(i64 %x) {
; CHECK-LABEL: @test_free_intrinsics(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 100000000032, ptr [[PTR]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 100000000064, ptr [[PTR]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 100000000128, ptr [[PTR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR]])
; CHECK-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 100000000256, ptr [[PTR]])
; CHECK-NEXT: call void @llvm.invariant.end.p0(ptr [[I]], i64 100000000256, ptr [[PTR]])
; CHECK-NEXT: ret void
;
entry:
%ptr = alloca i8
- call void @llvm.lifetime.start.p0(i64 100000000032, ptr %ptr)
- call void @llvm.lifetime.start.p0(i64 100000000064, ptr %ptr)
- call void @llvm.lifetime.end.p0(i64 100000000128, ptr %ptr)
+ call void @llvm.lifetime.start.p0(ptr %ptr)
+ call void @llvm.lifetime.start.p0(ptr %ptr)
+ call void @llvm.lifetime.end.p0(ptr %ptr)
%i = call ptr @llvm.invariant.start.p0(i64 100000000256, ptr %ptr)
call void @llvm.invariant.end.p0(ptr %i, i64 100000000256, ptr %ptr)
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
declare ptr @llvm.invariant.start.p0(i64, ptr nocapture)
declare void @llvm.invariant.end.p0(ptr, i64, ptr nocapture)
diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-06.ll b/llvm/test/Transforms/Coroutines/coro-alloca-06.ll
index 89149ce..bf75196 100644
--- a/llvm/test/Transforms/Coroutines/coro-alloca-06.ll
+++ b/llvm/test/Transforms/Coroutines/coro-alloca-06.ll
@@ -17,11 +17,11 @@ entry:
tricky:
%2 = call ptr @await_suspend()
store ptr %2, ptr %0, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %1)
+ call void @llvm.lifetime.start.p0(ptr %1)
store ptr %0, ptr %1, align 8
%3 = load ptr, ptr %1, align 8
%4 = load ptr, ptr %3, align 8
- call void @llvm.lifetime.end.p0(i64 8, ptr %1)
+ call void @llvm.lifetime.end.p0(ptr %1)
br label %finish
finish:
@@ -49,9 +49,9 @@ suspend:
; CHECK: [[TMP2:%.*]] = call ptr @await_suspend()
; CHECK-NEXT: store ptr [[TMP2]], ptr [[TMP0]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP1]])
; CHECK-NEXT: store ptr [[TMP0]], ptr [[TMP1]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[TMP1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP1]])
;
declare ptr @llvm.coro.free(token, ptr)
@@ -65,8 +65,8 @@ declare i1 @llvm.coro.alloc(token)
declare ptr @llvm.coro.begin(token, ptr)
declare i1 @llvm.coro.end(ptr, i1, token)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare ptr @await_suspend()
declare void @print(ptr nocapture)
diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-07.ll b/llvm/test/Transforms/Coroutines/coro-alloca-07.ll
index 3b0acdd..8bfb8cf 100644
--- a/llvm/test/Transforms/Coroutines/coro-alloca-07.ll
+++ b/llvm/test/Transforms/Coroutines/coro-alloca-07.ll
@@ -13,11 +13,11 @@ entry:
br i1 %n, label %flag_true, label %flag_false
flag_true:
- call void @llvm.lifetime.start.p0(i64 8, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
br label %merge
flag_false:
- call void @llvm.lifetime.start.p0(i64 8, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
br label %merge
merge:
@@ -51,7 +51,7 @@ declare i1 @llvm.coro.alloc(token)
declare ptr @llvm.coro.begin(token, ptr)
declare i1 @llvm.coro.end(ptr, i1, token)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @print(ptr)
declare noalias ptr @malloc(i32)
diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-08.ll b/llvm/test/Transforms/Coroutines/coro-alloca-08.ll
index 5a14a0e..80be62a 100644
--- a/llvm/test/Transforms/Coroutines/coro-alloca-08.ll
+++ b/llvm/test/Transforms/Coroutines/coro-alloca-08.ll
@@ -18,9 +18,9 @@ entry:
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
- call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
call void @consume.i8.array(ptr %testval)
- call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
%save = call token @llvm.coro.save(ptr null)
%suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
@@ -53,9 +53,9 @@ entry:
await.ready:
%StrayCoroSave = call token @llvm.coro.save(ptr null)
- call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
call void @consume.i8.array(ptr %testval)
- call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
br label %exit
exit:
@@ -76,5 +76,5 @@ declare ptr @llvm.coro.frame() #5
declare i8 @llvm.coro.suspend(token, i1) #3
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-09.ll b/llvm/test/Transforms/Coroutines/coro-alloca-09.ll
deleted file mode 100644
index 5c60c5b..0000000
--- a/llvm/test/Transforms/Coroutines/coro-alloca-09.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; RUN: opt < %s -passes='cgscc(coro-split),simplifycfg,early-cse' -S | FileCheck %s
-
-%"struct.std::coroutine_handle" = type { ptr }
-%"struct.std::coroutine_handle.0" = type { %"struct.std::coroutine_handle" }
-%"struct.lean_future<int>::Awaiter" = type { i32, %"struct.std::coroutine_handle.0" }
-
-declare ptr @malloc(i64)
-
-%i8.array = type { [100 x i8] }
-declare void @consume.i8(ptr)
-
-; The testval lives across suspend point so that it should be put on the frame.
-; However, part of testval has lifetime marker which indicates the part
-; wouldn't live across suspend point.
-; This test whether or not %testval would be put on the frame by ignoring the
-; partial lifetime markers.
-define void @foo(ptr %to_store) presplitcoroutine {
-entry:
- %testval = alloca %i8.array
- %subrange = getelementptr inbounds %i8.array, ptr %testval, i64 0, i32 0, i64 50
- %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
- %alloc = call ptr @malloc(i64 16) #3
- %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
-
- call void @llvm.lifetime.start.p0(i64 50, ptr %subrange)
- call void @consume.i8(ptr %subrange)
- call void @llvm.lifetime.end.p0(i64 50, ptr %subrange)
- store ptr %testval, ptr %to_store
-
- %save = call token @llvm.coro.save(ptr null)
- %suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
- switch i8 %suspend, label %exit [
- i8 0, label %await.ready
- i8 1, label %exit
- ]
-await.ready:
- %StrayCoroSave = call token @llvm.coro.save(ptr null)
- br label %exit
-exit:
- call i1 @llvm.coro.end(ptr null, i1 false, token none)
- ret void
-}
-
-; Verify that for both foo and bar, testval isn't put on the frame.
-; CHECK: %foo.Frame = type { ptr, ptr, %i8.array, i1 }
-
-declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
-declare i1 @llvm.coro.alloc(token) #3
-declare i64 @llvm.coro.size.i64() #5
-declare ptr @llvm.coro.begin(token, ptr writeonly) #3
-declare token @llvm.coro.save(ptr) #3
-declare ptr @llvm.coro.frame() #5
-declare i8 @llvm.coro.suspend(token, i1) #3
-declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
-declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll b/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll
index f828b22..8b8dbac 100644
--- a/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll
+++ b/llvm/test/Transforms/Coroutines/coro-alloca-loop-carried-address.ll
@@ -50,7 +50,7 @@ entry:
br label %loop
loop:
- call void @llvm.lifetime.start(i64 8, ptr %stackvar0)
+ call void @llvm.lifetime.start(ptr %stackvar0)
store i64 1234, ptr %stackvar0
@@ -58,7 +58,7 @@ loop:
; %stackvar1 and rely on it staying the same across suspension.
call void @bar()
- call void @llvm.lifetime.end(i64 8, ptr %stackvar0)
+ call void @llvm.lifetime.end(ptr %stackvar0)
%save = call token @llvm.coro.save(ptr null)
%suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
@@ -81,5 +81,5 @@ declare ptr @llvm.coro.begin(token, ptr writeonly)
declare token @llvm.coro.save(ptr)
declare i8 @llvm.coro.suspend(token, i1)
declare i1 @llvm.coro.end(ptr, i1, token)
-declare void @llvm.lifetime.start(i64, ptr nocapture)
-declare void @llvm.lifetime.end(i64, ptr nocapture)
+declare void @llvm.lifetime.start(ptr nocapture)
+declare void @llvm.lifetime.end(ptr nocapture)
diff --git a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll
index 07b3bd8..d662638 100644
--- a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll
+++ b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-infinite-loop-bug.ll
@@ -49,7 +49,7 @@ entry:
%id = call token @llvm.coro.id.async(i32 128, i32 16, i32 0,
ptr @my_async_function_fp)
%hdl = call ptr @llvm.coro.begin(token %id, ptr null)
- call void @llvm.lifetime.start.p0(i64 4, ptr %escaped_addr)
+ call void @llvm.lifetime.start.p0(ptr %escaped_addr)
call void @escape(ptr %escaped_addr)
br label %callblock
@@ -80,6 +80,6 @@ declare void @llvm.coro.async.context.dealloc(ptr)
declare swiftcc void @asyncSuspend(ptr)
declare ptr @llvm.coro.async.resume()
declare void @llvm.coro.async.size.replace(ptr, ptr)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
attributes #0 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll
index 4010159..49c4207 100644
--- a/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll
+++ b/llvm/test/Transforms/Coroutines/coro-async-addr-lifetime-start-bug.ll
@@ -43,7 +43,7 @@ entry:
%id = call token @llvm.coro.id.async(i32 128, i32 16, i32 0,
ptr @my_async_function_fp)
%hdl = call ptr @llvm.coro.begin(token %id, ptr null)
- call void @llvm.lifetime.start.p0(i64 4, ptr %escaped_addr)
+ call void @llvm.lifetime.start.p0(ptr %escaped_addr)
br label %callblock
@@ -81,7 +81,7 @@ loop:
br label %callblock
loop_exit:
- call void @llvm.lifetime.end.p0(i64 4, ptr %escaped_addr)
+ call void @llvm.lifetime.end.p0(ptr %escaped_addr)
call i1 (ptr, i1, ...) @llvm.coro.end.async(ptr %hdl, i1 false)
unreachable
}
@@ -104,6 +104,6 @@ declare void @llvm.coro.async.context.dealloc(ptr)
declare swiftcc void @asyncSuspend(ptr)
declare ptr @llvm.coro.async.resume()
declare void @llvm.coro.async.size.replace(ptr, ptr)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
attributes #0 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/Coroutines/coro-byval-param.ll b/llvm/test/Transforms/Coroutines/coro-byval-param.ll
index 4705918..38ab5ac 100644
--- a/llvm/test/Transforms/Coroutines/coro-byval-param.ll
+++ b/llvm/test/Transforms/Coroutines/coro-byval-param.ll
@@ -19,7 +19,7 @@ coro.alloc: ; preds = %entry
coro.init: ; preds = %coro.alloc, %entry
%3 = phi ptr [ null, %entry ], [ %call, %coro.alloc ]
%4 = call ptr @llvm.coro.begin(token %0, ptr %3) #10
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %__promise) #2
%call2 = call ptr @_ZN4task12promise_type17get_return_objectEv(ptr nonnull dereferenceable(1) %__promise)
call void @initial_suspend(ptr nonnull dereferenceable(1) %__promise)
%5 = call token @llvm.coro.save(ptr null)
@@ -31,9 +31,9 @@ coro.init: ; preds = %coro.alloc, %entry
]
init.ready: ; preds = %coro.init
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %a2) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %a2) #2
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %a2, ptr align 8 %a1, i64 24, i1 false)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %a2) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %a2) #2
call void @_ZN4task12promise_type13final_suspendEv(ptr nonnull dereferenceable(1) %__promise) #2
%7 = call token @llvm.coro.save(ptr null)
call fastcc void @_ZNSt12experimental13coroutines_v116coroutine_handleIN4task12promise_typeEE12from_addressEPv(ptr %4) #2
@@ -42,7 +42,7 @@ init.ready: ; preds = %coro.init
br i1 %switch, label %cleanup33, label %coro.ret
cleanup33: ; preds = %init.ready, %coro.init
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %__promise) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %__promise) #2
%9 = call ptr @llvm.coro.free(token %0, ptr %4)
%.not = icmp eq ptr %9, null
br i1 %.not, label %coro.ret, label %coro.free
@@ -75,7 +75,7 @@ declare i64 @llvm.coro.size.i64() #4
declare ptr @llvm.coro.begin(token, ptr writeonly) #2
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.start.p0(ptr nocapture) #5
; Function Attrs: argmemonly nofree nounwind willreturn
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #6
@@ -93,7 +93,7 @@ declare token @llvm.coro.save(ptr) #2
declare hidden fastcc void @_ZNSt12experimental13coroutines_v116coroutine_handleIN4task12promise_typeEE12from_addressEPv(ptr) unnamed_addr #7 align 2
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.end.p0(ptr nocapture) #5
; Function Attrs: nounwind
declare i8 @llvm.coro.suspend(token, i1) #2
diff --git a/llvm/test/Transforms/Coroutines/coro-elide-musttail.ll b/llvm/test/Transforms/Coroutines/coro-elide-musttail.ll
index 6c6e5a6..d369a21 100644
--- a/llvm/test/Transforms/Coroutines/coro-elide-musttail.ll
+++ b/llvm/test/Transforms/Coroutines/coro-elide-musttail.ll
@@ -48,7 +48,7 @@ entry:
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
; Function Attrs: argmemonly nounwind readonly
declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) #1
diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll
index c9700c8..bf08d6f 100644
--- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll
+++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-00.ll
@@ -16,33 +16,33 @@ entry:
br i1 %cond, label %then, label %else
then:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %data)
+ call void @llvm.lifetime.start.p0(ptr nonnull %data)
call void @consume(ptr %data)
%suspend.value = call i8 @llvm.coro.suspend(token none, i1 false)
switch i8 %suspend.value, label %coro.ret [i8 0, label %resume
i8 1, label %cleanup1]
resume:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data)
br label %cleanup1
cleanup1:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data)
br label %cleanup
else:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %data2)
+ call void @llvm.lifetime.start.p0(ptr nonnull %data2)
call void @consume(ptr %data2)
%suspend.value2 = call i8 @llvm.coro.suspend(token none, i1 false)
switch i8 %suspend.value2, label %coro.ret [i8 0, label %resume2
i8 1, label %cleanup2]
resume2:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data2)
br label %cleanup2
cleanup2:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data2)
br label %cleanup
cleanup:
@@ -72,5 +72,5 @@ declare noalias ptr @malloc(i32)
declare double @print(double)
declare void @free(ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll
index 584caa35..78c6f0c 100644
--- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll
+++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-01.ll
@@ -17,10 +17,10 @@ entry:
br label %init.ready
init.ready:
%1 = call noalias nonnull ptr @llvm.coro.begin(token %0, ptr null)
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise)
+ call void @llvm.lifetime.start.p0(ptr nonnull %__promise)
br i1 %cond, label %if.then, label %if.else
if.then:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call void @consume(ptr nonnull %a)
%save = call token @llvm.coro.save(ptr null)
%suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
@@ -29,10 +29,10 @@ if.then:
i8 1, label %cleanup1
]
await.ready:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup1
if.else:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %b)
+ call void @llvm.lifetime.start.p0(ptr nonnull %b)
call void @consume(ptr nonnull %b)
%save2 = call token @llvm.coro.save(ptr null)
%suspend2 = call i8 @llvm.coro.suspend(token %save2, i1 false)
@@ -41,13 +41,13 @@ if.else:
i8 1, label %cleanup2
]
await2.ready:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup2
cleanup1:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup
cleanup2:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup
cleanup:
call ptr @llvm.coro.free(token %0, ptr %1)
@@ -69,5 +69,5 @@ declare ptr @llvm.coro.frame() #5
declare i8 @llvm.coro.suspend(token, i1) #3
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll
index f916ebb..8265731 100644
--- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll
+++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-02.ll
@@ -19,10 +19,10 @@ entry:
br label %init.ready
init.ready:
%1 = call noalias nonnull ptr @llvm.coro.begin(token %0, ptr null)
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise)
+ call void @llvm.lifetime.start.p0(ptr nonnull %__promise)
br i1 %cond, label %if.then, label %if.else
if.then:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call void @consume(ptr nonnull %a)
%save = call token @llvm.coro.save(ptr null)
%suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
@@ -31,10 +31,10 @@ if.then:
i8 1, label %cleanup1
]
await.ready:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup1
if.else:
- call void @llvm.lifetime.start.p0(i64 300, ptr nonnull %b)
+ call void @llvm.lifetime.start.p0(ptr nonnull %b)
call void @consume.2(ptr nonnull %b)
%save2 = call token @llvm.coro.save(ptr null)
%suspend2 = call i8 @llvm.coro.suspend(token %save2, i1 false)
@@ -43,13 +43,13 @@ if.else:
i8 1, label %cleanup2
]
await2.ready:
- call void @llvm.lifetime.end.p0(i64 300, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup2
cleanup1:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup
cleanup2:
- call void @llvm.lifetime.end.p0(i64 300, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup
cleanup:
call ptr @llvm.coro.free(token %0, ptr %1)
@@ -70,5 +70,5 @@ declare ptr @llvm.coro.frame() #5
declare i8 @llvm.coro.suspend(token, i1) #3
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll
index 525df87..66d4137 100644
--- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll
+++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-03.ll
@@ -16,33 +16,33 @@ entry:
br i1 %cond, label %then, label %else
then:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %data)
+ call void @llvm.lifetime.start.p0(ptr nonnull %data)
call void @consume(ptr %data)
%suspend.value = call i8 @llvm.coro.suspend(token none, i1 false)
switch i8 %suspend.value, label %coro.ret [i8 0, label %resume
i8 1, label %cleanup1]
resume:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data)
br label %cleanup1
cleanup1:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data)
br label %cleanup
else:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %data2)
+ call void @llvm.lifetime.start.p0(ptr nonnull %data2)
call void @consume(ptr %data2)
%suspend.value2 = call i8 @llvm.coro.suspend(token none, i1 false)
switch i8 %suspend.value2, label %coro.ret [i8 0, label %resume2
i8 1, label %cleanup2]
resume2:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data2)
br label %cleanup2
cleanup2:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %data2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %data2)
br label %cleanup
cleanup:
@@ -72,5 +72,5 @@ declare noalias ptr @malloc(i32)
declare double @print(double)
declare void @free(ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll
index 27e0c47..6ff31e5 100644
--- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll
+++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-04.ll
@@ -19,10 +19,10 @@ entry:
br label %init.ready
init.ready:
%1 = call noalias nonnull ptr @llvm.coro.begin(token %0, ptr null)
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise)
+ call void @llvm.lifetime.start.p0(ptr nonnull %__promise)
br i1 %cond, label %if.then, label %if.else
if.then:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call void @consume(ptr nonnull %a)
%save = call token @llvm.coro.save(ptr null)
%suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
@@ -31,10 +31,10 @@ if.then:
i8 1, label %cleanup1
]
await.ready:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup1
if.else:
- call void @llvm.lifetime.start.p0(i64 300, ptr nonnull %b)
+ call void @llvm.lifetime.start.p0(ptr nonnull %b)
call void @consume.2(ptr nonnull %b)
%save2 = call token @llvm.coro.save(ptr null)
%suspend2 = call i8 @llvm.coro.suspend(token %save2, i1 false)
@@ -43,13 +43,13 @@ if.else:
i8 1, label %cleanup2
]
await2.ready:
- call void @llvm.lifetime.end.p0(i64 300, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup2
cleanup1:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup
cleanup2:
- call void @llvm.lifetime.end.p0(i64 300, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup
cleanup:
call ptr @llvm.coro.free(token %0, ptr %1)
@@ -70,5 +70,5 @@ declare ptr @llvm.coro.frame() #5
declare i8 @llvm.coro.suspend(token, i1) #3
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll
index 6d93eea..c3da8e8 100644
--- a/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll
+++ b/llvm/test/Transforms/Coroutines/coro-frame-reuse-alloca-05.ll
@@ -19,10 +19,10 @@ entry:
br label %init.ready
init.ready:
%1 = call noalias nonnull ptr @llvm.coro.begin(token %0, ptr null)
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %__promise)
+ call void @llvm.lifetime.start.p0(ptr nonnull %__promise)
br i1 %cond, label %if.then, label %if.else
if.then:
- call void @llvm.lifetime.start.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call void @consume(ptr nonnull %a)
%save = call token @llvm.coro.save(ptr null)
%suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
@@ -31,10 +31,10 @@ if.then:
i8 1, label %cleanup1
]
await.ready:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup1
if.else:
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %b)
+ call void @llvm.lifetime.start.p0(ptr nonnull %b)
call void @consume.2(ptr nonnull %b)
%save2 = call token @llvm.coro.save(ptr null)
%suspend2 = call i8 @llvm.coro.suspend(token %save2, i1 false)
@@ -43,13 +43,13 @@ if.else:
i8 1, label %cleanup2
]
await2.ready:
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup2
cleanup1:
- call void @llvm.lifetime.end.p0(i64 500, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
br label %cleanup
cleanup2:
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
br label %cleanup
cleanup:
call ptr @llvm.coro.free(token %0, ptr %1)
@@ -70,5 +70,5 @@ declare ptr @llvm.coro.frame() #5
declare i8 @llvm.coro.suspend(token, i1) #3
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll b/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll
index 8d0e772..df2ed7e 100644
--- a/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll
+++ b/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll
@@ -31,7 +31,7 @@ entry:
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
- call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
call void @consume.i8.array(ptr %testval)
%save = call token @llvm.coro.save(ptr null)
@@ -68,7 +68,7 @@ entry:
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
- call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
call void @consume.i8.array(ptr %testval)
%save = call token @llvm.coro.save(ptr null)
@@ -81,7 +81,7 @@ await.ready:
br label %exit
exit:
call i1 @llvm.coro.end(ptr null, i1 false, token none)
- call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
ret void
}
@@ -107,7 +107,7 @@ entry:
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
- call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
call void @consume.i8.array(ptr %testval)
%0 = load i8, ptr @testbool, align 1
@@ -115,7 +115,7 @@ entry:
br i1 %tobool, label %if.then, label %if.end
if.then:
- call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
br label %if.end
if.end:
@@ -138,5 +138,5 @@ declare ptr @llvm.coro.begin(token, ptr writeonly) #3
declare ptr @llvm.coro.frame() #5
declare i8 @llvm.coro.suspend(token, i1) #3
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll b/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll
index 3f0899a..c3d0fb1 100644
--- a/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll
+++ b/llvm/test/Transforms/Coroutines/coro-only-destroy-when-complete.ll
@@ -19,7 +19,7 @@ coro.alloc: ; preds = %entry
init.suspend: ; preds = %entry, %coro.alloc
%3 = phi ptr [ null, %entry ], [ %call, %coro.alloc ]
%4 = call ptr @llvm.coro.begin(token %0, ptr %3) #12
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %__promise) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %__promise) #3
store ptr null, ptr %__promise, align 8
%5 = call token @llvm.coro.save(ptr null)
%6 = call i8 @llvm.coro.suspend(token %5, i1 false)
@@ -80,7 +80,7 @@ cleanup3:
br label %cleanup62
cleanup62: ; preds = %await2.suspend, %await.suspend, %init.suspend, %final.suspend
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %__promise) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %__promise) #3
%18 = call ptr @llvm.coro.free(token %0, ptr %4)
%.not = icmp eq ptr %18, null
br i1 %.not, label %coro.ret, label %coro.free
@@ -99,9 +99,9 @@ declare i1 @llvm.coro.alloc(token) #3
declare dso_local noundef nonnull ptr @_Znwm(i64 noundef) local_unnamed_addr #4
declare i64 @llvm.coro.size.i64() #5
declare ptr @llvm.coro.begin(token, ptr writeonly) #3
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #6
+declare void @llvm.lifetime.start.p0(ptr nocapture) #6
declare token @llvm.coro.save(ptr) #7
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #6
+declare void @llvm.lifetime.end.p0(ptr nocapture) #6
declare i8 @llvm.coro.suspend(token, i1) #3
declare ptr @_Z5Innerv() local_unnamed_addr
declare dso_local void @_ZdlPv(ptr noundef) local_unnamed_addr #8
diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll b/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll
index 8ed0384..31839aa 100644
--- a/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll
+++ b/llvm/test/Transforms/Coroutines/coro-retcon-unreachable.ll
@@ -37,9 +37,9 @@ declare token @llvm.coro.id.retcon.once(i32, i32, ptr, ptr, ptr, ptr) #5
declare ptr @llvm.coro.begin(token, ptr writeonly) #5
declare token @llvm.coro.alloca.alloc.i64(i64, i32) #5
declare ptr @llvm.coro.alloca.get(token) #5
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #6
+declare void @llvm.lifetime.start.p0(ptr nocapture) #6
declare i1 @llvm.coro.suspend.retcon.i1(...) #5
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #6
+declare void @llvm.lifetime.end.p0(ptr nocapture) #6
declare void @llvm.coro.alloca.free(token) #5
declare i1 @llvm.coro.end(ptr, i1, token) #5
diff --git a/llvm/test/Transforms/Coroutines/coro-split-02.ll b/llvm/test/Transforms/Coroutines/coro-split-02.ll
index 31e8e81..c487ab1 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-02.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-02.ll
@@ -27,10 +27,10 @@ entry:
await.ready:
%StrayCoroSave = call token @llvm.coro.save(ptr null)
%val = load i32, ptr %ref.tmp7
- call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
%test = load i32, ptr %testval
call void @print(i32 %test)
- call void @llvm.lifetime.end.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
call void @print(i32 %val)
br label %exit
exit:
@@ -42,10 +42,10 @@ exit:
; CHECK: %testval = alloca i32
; CHECK-NOT: call token @llvm.coro.save(ptr null)
; CHECK: %val = load i32, ptr %ref.tmp7
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %testval)
; CHECK-NEXT: %test = load i32, ptr %testval
; CHECK-NEXT: call void @print(i32 %test)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %testval)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %testval)
; CHECK-NEXT: call void @print(i32 %val)
; CHECK-NEXT: ret void
@@ -61,5 +61,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3
declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-split-dbg.ll b/llvm/test/Transforms/Coroutines/coro-split-dbg.ll
index 184d4a5..9a9e3c3 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-dbg.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-dbg.ll
@@ -43,7 +43,7 @@ coro_Suspend: ; preds = %for.cond, %if.then,
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
; Function Attrs: argmemonly nounwind readonly
declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) #5
@@ -54,7 +54,7 @@ declare i64 @llvm.coro.size.i64() #1
declare ptr @llvm.coro.begin(token, ptr writeonly) #7
declare token @llvm.coro.save(ptr) #7
declare i8 @llvm.coro.suspend(token, i1) #7
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #5
declare void @free(ptr nocapture) local_unnamed_addr #6
declare i1 @llvm.coro.end(ptr, i1, token) #7
diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll
index e2ed205..e661932 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-musttail-chain-pgo-counter-promo.ll
@@ -33,8 +33,8 @@ declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8)
declare i8 @llvm.coro.suspend(token, i1)
declare void @llvm.instrprof.increment(ptr, i64, i32, i32)
declare void @llvm.instrprof.value.profile(ptr, i64, i64, i32, i32)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Function Attrs: noinline nounwind presplitcoroutine uwtable
define ptr @f(i32 %0) presplitcoroutine align 32 {
@@ -56,11 +56,11 @@ define ptr @f(i32 %0) presplitcoroutine align 32 {
12: ; preds = %8, %1
%13 = phi ptr [ null, %1 ], [ %11, %8 ]
%14 = call ptr @llvm.coro.begin(token %6, ptr %13) #28
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3) #9
+ call void @llvm.lifetime.start.p0(ptr nonnull %3) #9
store ptr null, ptr %3, align 16
%15 = getelementptr inbounds {ptr, i64}, ptr %3, i64 0, i32 1
store i64 0, ptr %15, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %4) #9
+ call void @llvm.lifetime.start.p0(ptr nonnull %4) #9
store ptr %3, ptr %4, align 8
%16 = call token @llvm.coro.save(ptr null)
call void @await_suspend(ptr noundef nonnull align 1 dereferenceable(1) %4, ptr %14) #9
@@ -71,7 +71,7 @@ define ptr @f(i32 %0) presplitcoroutine align 32 {
]
18: ; preds = %12
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %4) #9
+ call void @llvm.lifetime.end.p0(ptr nonnull %4) #9
%19 = icmp slt i32 0, %0
br i1 %19, label %20, label %36
@@ -79,12 +79,12 @@ define ptr @f(i32 %0) presplitcoroutine align 32 {
br label %22
21: ; preds = %12
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %4) #9
+ call void @llvm.lifetime.end.p0(ptr nonnull %4) #9
br label %54
22: ; preds = %20, %31
%23 = phi i32 [ 0, %20 ], [ %32, %31 ]
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %5) #9
+ call void @llvm.lifetime.start.p0(ptr nonnull %5) #9
%24 = call ptr @other_coro()
store ptr %3, ptr %5, align 8
%25 = getelementptr inbounds { ptr, ptr }, ptr %5, i64 0, i32 1
@@ -98,13 +98,13 @@ define ptr @f(i32 %0) presplitcoroutine align 32 {
]
31: ; preds = %22
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %5) #9
+ call void @llvm.lifetime.end.p0(ptr nonnull %5) #9
%32 = add nuw nsw i32 %23, 1
%33 = icmp slt i32 %32, %0
br i1 %33, label %22, label %35, !llvm.loop !0
34: ; preds = %22
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %5) #9
+ call void @llvm.lifetime.end.p0(ptr nonnull %5) #9
br label %54
35: ; preds = %31
@@ -142,11 +142,11 @@ define ptr @f(i32 %0) presplitcoroutine align 32 {
br label %54
53: ; preds = %47
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %2) #9
+ call void @llvm.lifetime.start.p0(ptr nonnull %2) #9
unreachable
54: ; preds = %52, %34, %21
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3) #9
+ call void @llvm.lifetime.end.p0(ptr nonnull %3) #9
%55 = call ptr @llvm.coro.free(token %6, ptr %14)
%56 = icmp eq ptr %55, null
br i1 %56, label %61, label %57
diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll
index 7c1a13f..b256175 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-musttail5.ll
@@ -9,7 +9,7 @@ entry:
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%alloc.var = alloca i8
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.start.p0(ptr %alloc.var)
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
%save = call token @llvm.coro.save(ptr null)
@@ -29,7 +29,7 @@ await.suspend:
]
await.ready:
call void @consume(ptr %alloc.var)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.end.p0(ptr %alloc.var)
br label %exit
exit:
call i1 @llvm.coro.end(ptr null, i1 false, token none)
@@ -53,8 +53,8 @@ declare i1 @llvm.coro.end(ptr, i1, token) #2
declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1
declare ptr @malloc(i64)
declare void @consume(ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare ptr @await_suspend_function(ptr %awaiter, ptr %hdl)
attributes #0 = { presplitcoroutine }
diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll
index e05169a..99174ff 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-musttail6.ll
@@ -13,7 +13,7 @@ entry:
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%alloc.var = alloca i64
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.start.p0(ptr %alloc.var)
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
%save = call token @llvm.coro.save(ptr null)
@@ -33,7 +33,7 @@ await.suspend:
]
await.ready:
call void @consume(ptr %alloc.var)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.end.p0(ptr %alloc.var)
br label %exit
exit:
call i1 @llvm.coro.end(ptr null, i1 false, token none)
@@ -51,7 +51,7 @@ entry:
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%alloc.var = alloca i64
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.start.p0(ptr %alloc.var)
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
%save = call token @llvm.coro.save(ptr null)
@@ -71,7 +71,7 @@ await.suspend:
]
await.ready:
call void @consume(ptr %alloc.var)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.end.p0(ptr %alloc.var)
br label %exit
cleanup:
@@ -106,8 +106,8 @@ declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1
declare ptr @malloc(i64)
declare void @delete(ptr nonnull) #2
declare void @consume(ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare ptr @await_suspend_function(ptr %awaiter, ptr %hdl)
attributes #0 = { presplitcoroutine }
diff --git a/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll b/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll
index 8ceb0dd..91f8543 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-musttail7.ll
@@ -11,7 +11,7 @@ entry:
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%alloc.var = alloca i64
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.start.p0(ptr %alloc.var)
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
%save = call token @llvm.coro.save(ptr null)
@@ -36,7 +36,7 @@ await.suspend:
]
await.ready:
call void @consume(ptr %alloc.var)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.end.p0(ptr %alloc.var)
br label %exit
exit:
%result = phi i64 [0, %entry], [0, %entry], [%foo, %await.suspend], [%foo, %await.suspend], [%foo, %await.ready]
@@ -57,7 +57,7 @@ entry:
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%alloc.var = alloca i64
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.start.p0(ptr %alloc.var)
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
%save = call token @llvm.coro.save(ptr null)
@@ -77,7 +77,7 @@ await.suspend:
]
await.ready:
call void @consume(ptr %alloc.var)
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloc.var)
+ call void @llvm.lifetime.end.p0(ptr %alloc.var)
br label %exit
cleanup:
@@ -114,8 +114,8 @@ declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #1
declare ptr @malloc(i64)
declare void @delete(ptr nonnull) #2
declare void @consume(ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare ptr @await_suspend_function(ptr %awaiter, ptr %hdl)
attributes #0 = { presplitcoroutine }
diff --git a/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll b/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll
index 157807d..12d6564 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-no-lifetime.ll
@@ -12,11 +12,11 @@ entry:
br i1 %n, label %flag_true, label %flag_false
flag_true:
- call void @llvm.lifetime.start.p0(i64 8, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
br label %merge
flag_false:
- call void @llvm.lifetime.start.p0(i64 8, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
br label %merge
merge:
@@ -27,8 +27,8 @@ merge:
i8 1, label %cleanup]
resume:
call void @print(ptr %phi)
- call void @llvm.lifetime.end.p0(i64 8, ptr %x)
- call void @llvm.lifetime.end.p0(i64 8, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %y)
br label %cleanup
cleanup:
@@ -54,8 +54,8 @@ declare i1 @llvm.coro.alloc(token)
declare ptr @llvm.coro.begin(token, ptr)
declare i1 @llvm.coro.end(ptr, i1, token)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @print(ptr)
declare noalias ptr @malloc(i32)
diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll
index 1d0cf94..a5a2bcf 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-01.ll
@@ -17,7 +17,7 @@ entry:
%ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8
%testval = alloca i32
; lifetime of %testval starts here, but not used until await.ready.
- call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
@@ -33,7 +33,7 @@ await.ready:
%val = load i32, ptr %ref.tmp7
%test = load i32, ptr %testval
call void @print(i32 %test)
- call void @llvm.lifetime.end.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
call void @print(i32 %val)
br label %exit
exit:
@@ -43,11 +43,11 @@ exit:
; CHECK-LABEL: @a.resume(
; CHECK: %testval = alloca i32, align 4
-; CHECK: call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %testval)
; CHECK-NEXT: %val = load i32, ptr %ref.tmp7
; CHECK-NEXT: %test = load i32, ptr %testval
; CHECK-NEXT: call void @print(i32 %test)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %testval)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %testval)
; CHECK-NEXT: call void @print(i32 %val)
; CHECK-NEXT: ret void
@@ -56,7 +56,7 @@ entry:
%ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8
%testval = alloca i32
; lifetime of %testval starts here, but not used until await.ready.
- call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
@@ -72,7 +72,7 @@ await.ready:
%val = load i32, ptr %ref.tmp7
%test = load i32, ptr %testval
call void @print(i32 %test)
- call void @llvm.lifetime.end.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
call void @print(i32 %val)
br label %exit
exit:
@@ -92,5 +92,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3
declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll
index 38a2a33..abc91c3 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-02.ll
@@ -15,7 +15,7 @@ entry:
%ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8
%testval = alloca i32
; lifetime of %testval starts here, but not used until await.ready.
- call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
@@ -44,7 +44,7 @@ await.ready:
after.await:
%test1 = load i32, ptr %testval
call void @print(i32 %test1)
- call void @llvm.lifetime.end.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
br label %exit
exit:
@@ -54,7 +54,7 @@ exit:
; CHECK-LABEL: @a.resume(
; CHECK: %[[VAL:testval.+]] = getelementptr inbounds %a.Frame
-; CHECK-NOT: call void @llvm.lifetime.start.p0(i64 4, ptr %{{.*}})
+; CHECK-NOT: call void @llvm.lifetime.start.p0(ptr %{{.*}})
; CHECK: %test = load i32, ptr %[[VAL]]
declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
@@ -69,5 +69,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3
declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll
index de377a6..efd1adf 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-03.ll
@@ -17,7 +17,7 @@ entry:
%ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8
%testval = alloca %i8.array
; lifetime of %testval starts here, but not used until await.ready.
- call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
@@ -32,7 +32,7 @@ await.ready:
%StrayCoroSave = call token @llvm.coro.save(ptr null)
%val = load i32, ptr %ref.tmp7
call void @consume.i8.array(ptr %testval)
- call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
call void @print(i32 %val)
br label %exit
exit:
@@ -41,10 +41,10 @@ exit:
}
; CHECK-LABEL: @a.gep.resume(
; CHECK: %testval = alloca %i8.array
-; CHECK: call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %testval)
; CHECK-NEXT: %val = load i32, ptr %ref.tmp7
; CHECK-NEXT: call void @consume.i8.array(ptr %testval)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %testval)
; CHECK-NEXT: call void @print(i32 %val)
; CHECK-NEXT: ret void
@@ -60,5 +60,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3
declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll
index 8210455..af5aa8a 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-sink-lifetime-04.ll
@@ -15,7 +15,7 @@ entry:
%ref.tmp7 = alloca %"struct.lean_future<int>::Awaiter", align 8
%testval = alloca i8
; lifetime of %testval starts here, but not used until await.ready.
- call void @llvm.lifetime.start.p0(i64 1, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
%id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
%alloc = call ptr @malloc(i64 16) #3
%vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
@@ -31,7 +31,7 @@ await.ready:
%val = load i32, ptr %ref.tmp7
%test = load i8, ptr %testval
call void @consume.i8(i8 %test)
- call void @llvm.lifetime.end.p0(i64 1, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
call void @print(i32 %val)
br label %exit
exit:
@@ -41,11 +41,11 @@ exit:
; CHECK-LABEL: @a.resume(
; CHECK: %testval = alloca i8, align 1
-; CHECK: call void @llvm.lifetime.start.p0(i64 1, ptr %testval)
+; CHECK: call void @llvm.lifetime.start.p0(ptr %testval)
; CHECK-NEXT: %val = load i32, ptr %ref.tmp7
; CHECK-NEXT: %test = load i8, ptr %testval
; CHECK-NEXT: call void @consume.i8(i8 %test)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr %testval)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %testval)
; CHECK-NEXT: call void @print(i32 %val)
; CHECK-NEXT: ret void
@@ -62,5 +62,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3
declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1, token) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll b/llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll
index d2c4f57..4eec7ed 100644
--- a/llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll
+++ b/llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll
@@ -61,11 +61,11 @@ entry:
ret ptr %task
; CHECK: %[[TASK:.+]] = alloca %struct.Task, align 8
; CHECK-NEXT: %[[FRAME:.+]] = alloca [32 x i8], align 8
- ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %[[TASK]])
+ ; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %[[TASK]])
; CHECK-NEXT: %[[ID:.+]] = call token @llvm.coro.id(i32 0, ptr null, ptr @callee, ptr @callee.resumers)
; CHECK-NEXT: %[[HDL:.+]] = call ptr @llvm.coro.begin(token %[[ID]], ptr null)
; CHECK-NEXT: store ptr %[[HDL]], ptr %[[TASK]], align 8
- ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %[[TASK]])
+ ; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %[[TASK]])
; CHECK-NEXT: ret ptr %[[TASK]]
}
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll b/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll
index 9e47bd2..390e96e 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll
@@ -12,14 +12,14 @@ target triple = "x86_64-unknown-linux-gnu"
@.str = private unnamed_addr constant [8 x i8] c"a = %l\0A\00", align 1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @hoo(ptr)
declare i32 @printf(ptr nocapture readonly, ...)
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @goo(i32 %N, ptr %b) {
entry:
@@ -32,12 +32,12 @@ for.cond: ; preds = %for.body, %entry
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 8, ptr %a.i)
+ call void @llvm.lifetime.start.p0(ptr %a.i)
call void @hoo(ptr %a.i)
call void @hoo(ptr %b)
%tmp1 = load volatile i64, ptr %a.i, align 8
%call.i = call i32 (ptr, ...) @printf(ptr @.str, i64 %tmp1)
- call void @llvm.lifetime.end.p0(i64 8, ptr %a.i)
+ call void @llvm.lifetime.end.p0(ptr %a.i)
%inc = add nsw i32 %i.0, 1
br label %for.cond
diff --git a/llvm/test/Transforms/DCE/basic.ll b/llvm/test/Transforms/DCE/basic.ll
index 1a3b12e..28772f0 100644
--- a/llvm/test/Transforms/DCE/basic.ll
+++ b/llvm/test/Transforms/DCE/basic.ll
@@ -10,8 +10,8 @@ define void @test() {
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
; CHECK-LABEL: @test_lifetime_alloca
define i32 @test_lifetime_alloca() {
@@ -21,8 +21,8 @@ define i32 @test_lifetime_alloca() {
; CHECK-NOT: llvm.lifetime.start
; CHECK-NOT: llvm.lifetime.end
%i = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %i)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
ret i32 0
}
diff --git a/llvm/test/Transforms/DeadStoreElimination/batchaa-caching-new-pointers.ll b/llvm/test/Transforms/DeadStoreElimination/batchaa-caching-new-pointers.ll
index ee9bd69..4ec69bc 100644
--- a/llvm/test/Transforms/DeadStoreElimination/batchaa-caching-new-pointers.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/batchaa-caching-new-pointers.ll
@@ -12,17 +12,17 @@ define ptr @foo(ptr noundef %ptr) {
; CHECK-LABEL: define ptr @foo(
; CHECK-SAME: ptr noundef [[PTR:%.*]]) {
; CHECK-NEXT: [[STRUCT_ALLOCA:%.*]] = alloca [[STRUCT_TYPE:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6:[0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6:[0-9]+]]
; CHECK-NEXT: [[STRUCT_BYTE_8:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 8
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_BYTE_8]], i64 4
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 [[TMP1]], i8 42, i64 4, i1 false)
; CHECK-NEXT: store i32 43, ptr [[STRUCT_BYTE_8]], align 4
; CHECK-NEXT: [[RET:%.*]] = load ptr, ptr [[STRUCT_BYTE_8]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
; CHECK-NEXT: ret ptr [[RET]]
;
%struct.alloca = alloca %struct.type, align 8
- call void @llvm.lifetime.start.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.start.p0(ptr nonnull %struct.alloca) nounwind
%struct.byte.8 = getelementptr inbounds i8, ptr %struct.alloca, i64 8
; Set %struct.alloca[8, 16) to 42.
call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 %struct.byte.8, i8 42, i64 8, i1 false)
@@ -33,7 +33,7 @@ define ptr @foo(ptr noundef %ptr) {
store i32 44, ptr %struct.byte.4, align 4
; Return %struct.alloca[8, 16).
%ret = load ptr, ptr %struct.byte.8
- call void @llvm.lifetime.end.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.end.p0(ptr nonnull %struct.alloca) nounwind
ret ptr %ret
}
@@ -44,7 +44,7 @@ define ptr @foo(ptr noundef %ptr) {
define ptr @foo_with_removable_malloc() {
; CHECK-LABEL: define ptr @foo_with_removable_malloc() {
; CHECK-NEXT: [[STRUCT_ALLOCA:%.*]] = alloca [[STRUCT_TYPE:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
; CHECK-NEXT: [[STRUCT_BYTE_4:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 4
; CHECK-NEXT: [[STRUCT_BYTE_8:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 8
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_BYTE_8]], i64 4
@@ -53,11 +53,11 @@ define ptr @foo_with_removable_malloc() {
; CHECK-NEXT: [[RET:%.*]] = load ptr, ptr [[STRUCT_BYTE_8]], align 8
; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_4]])
; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_8]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
; CHECK-NEXT: ret ptr [[RET]]
;
%struct.alloca = alloca %struct.type, align 8
- call void @llvm.lifetime.start.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.start.p0(ptr nonnull %struct.alloca) nounwind
%struct.byte.4 = getelementptr inbounds i8, ptr %struct.alloca, i64 4
%struct.byte.8 = getelementptr inbounds i8, ptr %struct.alloca, i64 8
@@ -79,7 +79,7 @@ define ptr @foo_with_removable_malloc() {
%ret = load ptr, ptr %struct.byte.8
call void @readnone(ptr %struct.byte.4);
call void @readnone(ptr %struct.byte.8);
- call void @llvm.lifetime.end.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.end.p0(ptr nonnull %struct.alloca) nounwind
ret ptr %ret
}
@@ -87,7 +87,7 @@ define ptr @foo_with_removable_malloc_free() {
; CHECK-LABEL: define ptr @foo_with_removable_malloc_free() {
; CHECK-NEXT: [[STRUCT_ALLOCA:%.*]] = alloca [[STRUCT_TYPE:%.*]], align 8
; CHECK-NEXT: [[M1:%.*]] = tail call ptr @malloc(i64 4)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
; CHECK-NEXT: [[STRUCT_BYTE_4:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 4
; CHECK-NEXT: [[STRUCT_BYTE_8:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 8
; CHECK-NEXT: [[M2:%.*]] = tail call ptr @malloc(i64 4)
@@ -99,12 +99,12 @@ define ptr @foo_with_removable_malloc_free() {
; CHECK-NEXT: [[RET:%.*]] = load ptr, ptr [[STRUCT_BYTE_8]], align 8
; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_4]])
; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_8]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
; CHECK-NEXT: ret ptr [[RET]]
;
%struct.alloca = alloca %struct.type, align 8
%m1 = tail call ptr @malloc(i64 4)
- call void @llvm.lifetime.start.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.start.p0(ptr nonnull %struct.alloca) nounwind
%struct.byte.4 = getelementptr inbounds i8, ptr %struct.alloca, i64 4
%struct.byte.8 = getelementptr inbounds i8, ptr %struct.alloca, i64 8
@@ -126,14 +126,14 @@ define ptr @foo_with_removable_malloc_free() {
%ret = load ptr, ptr %struct.byte.8
call void @readnone(ptr %struct.byte.4);
call void @readnone(ptr %struct.byte.8);
- call void @llvm.lifetime.end.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.end.p0(ptr nonnull %struct.alloca) nounwind
ret ptr %ret
}
define ptr @foo_with_malloc_to_calloc() {
; CHECK-LABEL: define ptr @foo_with_malloc_to_calloc() {
; CHECK-NEXT: [[STRUCT_ALLOCA:%.*]] = alloca [[STRUCT_TYPE:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
; CHECK-NEXT: [[STRUCT_BYTE_8:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 8
; CHECK-NEXT: [[STRUCT_BYTE_4:%.*]] = getelementptr inbounds i8, ptr [[STRUCT_ALLOCA]], i64 4
; CHECK-NEXT: [[CALLOC1:%.*]] = call ptr @calloc(i64 1, i64 4)
@@ -144,13 +144,13 @@ define ptr @foo_with_malloc_to_calloc() {
; CHECK-NEXT: [[RET:%.*]] = load ptr, ptr [[STRUCT_BYTE_8]], align 8
; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_4]])
; CHECK-NEXT: call void @readnone(ptr [[STRUCT_BYTE_8]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 56, ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[STRUCT_ALLOCA]]) #[[ATTR6]]
; CHECK-NEXT: call void @use(ptr [[CALLOC1]])
; CHECK-NEXT: call void @use(ptr [[CALLOC]])
; CHECK-NEXT: ret ptr [[RET]]
;
%struct.alloca = alloca %struct.type, align 8
- call void @llvm.lifetime.start.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.start.p0(ptr nonnull %struct.alloca) nounwind
%struct.byte.8 = getelementptr inbounds i8, ptr %struct.alloca, i64 8
%struct.byte.4 = getelementptr inbounds i8, ptr %struct.alloca, i64 4
@@ -172,15 +172,15 @@ define ptr @foo_with_malloc_to_calloc() {
%ret = load ptr, ptr %struct.byte.8
call void @readnone(ptr %struct.byte.4);
call void @readnone(ptr %struct.byte.8);
- call void @llvm.lifetime.end.p0(i64 56, ptr nonnull %struct.alloca) nounwind
+ call void @llvm.lifetime.end.p0(ptr nonnull %struct.alloca) nounwind
call void @use(ptr %m1)
call void @use(ptr %m2)
ret ptr %ret
}
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare noalias ptr @malloc(i64) willreturn allockind("alloc,uninitialized") "alloc-family"="malloc"
declare void @readnone(ptr) readnone nounwind
diff --git a/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll b/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll
index 7d827fa..56c84c7 100644
--- a/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/captures-before-load.ll
@@ -865,7 +865,7 @@ exit:
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #1
declare void @use.i64(i64)
@@ -883,7 +883,7 @@ define i64 @test_a_not_captured_at_all(ptr %ptr, ptr %ptr.2, i1 %c) {
; CHECK-NEXT: call void @use.i64(i64 [[LV_2]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: call void @clobber()
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[A]], i8 0, i64 8, i1 false)
; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[A]], align 4
@@ -902,7 +902,7 @@ then:
br label %exit
exit:
- call void @llvm.lifetime.start.p0(i64 8, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
store i64 99, ptr %a
call void @clobber()
call void @llvm.memset.p0.i64(ptr %a, i8 0, i64 8, i1 false)
@@ -1112,7 +1112,7 @@ else:
declare void @capture_and_clobber_multiple(ptr, ptr)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define i64 @earliest_escape_ptrtoint(ptr %p.1) {
; CHECK-LABEL: @earliest_escape_ptrtoint(
@@ -1122,7 +1122,7 @@ define i64 @earliest_escape_ptrtoint(ptr %p.1) {
; CHECK-NEXT: [[LV_1:%.*]] = load ptr, ptr [[P_1:%.*]], align 8
; CHECK-NEXT: [[LV_2:%.*]] = load i64, ptr [[LV_1]], align 4
; CHECK-NEXT: store ptr [[A_1]], ptr [[P_1]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[A_2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_2]])
; CHECK-NEXT: ret i64 [[LV_2]]
;
entry:
@@ -1134,7 +1134,7 @@ entry:
store ptr %a.1, ptr %p.1, align 8
%int = ptrtoint ptr %a.2 to i64
store i64 %int , ptr %a.2, align 8
- call void @llvm.lifetime.end.p0(i64 8, ptr %a.2)
+ call void @llvm.lifetime.end.p0(ptr %a.2)
ret i64 %lv.2
}
diff --git a/llvm/test/Transforms/DeadStoreElimination/dominate.ll b/llvm/test/Transforms/DeadStoreElimination/dominate.ll
index 262d16e..7e3ddb3 100644
--- a/llvm/test/Transforms/DeadStoreElimination/dominate.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/dominate.ll
@@ -8,12 +8,12 @@ bb1:
br label %bb3
bb2:
- call void @llvm.lifetime.end.p0(i64 -1, ptr %memtmp3.i)
+ call void @llvm.lifetime.end.p0(ptr %memtmp3.i)
br label %bb3
bb3:
call void @bar()
- call void @llvm.lifetime.end.p0(i64 -1, ptr %memtmp3.i)
+ call void @llvm.lifetime.end.p0(ptr %memtmp3.i)
br label %bb4
bb4:
@@ -21,4 +21,4 @@ bb4:
}
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
diff --git a/llvm/test/Transforms/DeadStoreElimination/libcalls.ll b/llvm/test/Transforms/DeadStoreElimination/libcalls.ll
index 27ad639..8225e14 100644
--- a/llvm/test/Transforms/DeadStoreElimination/libcalls.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/libcalls.ll
@@ -56,14 +56,14 @@ define void @test3(ptr %src) {
define void @test_strcat_with_lifetime(ptr %src) {
; CHECK-LABEL: @test_strcat_with_lifetime(
; CHECK-NEXT: [[B:%.*]] = alloca [16 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[B]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[B]])
; CHECK-NEXT: ret void
;
%B = alloca [16 x i8]
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %B)
+ call void @llvm.lifetime.start.p0(ptr nonnull %B)
%call = call ptr @strcat(ptr %B, ptr %src)
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %B)
+ call void @llvm.lifetime.end.p0(ptr nonnull %B)
ret void
}
@@ -344,61 +344,61 @@ entry:
define void @dse_strcpy(ptr nocapture readonly %src) {
; CHECK-LABEL: @dse_strcpy(
; CHECK-NEXT: [[A:%.*]] = alloca [256 x i8], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
%a = alloca [256 x i8], align 16
- call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call ptr @strcpy(ptr nonnull %a, ptr nonnull dereferenceable(1) %src)
- call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret void
}
define void @dse_strncpy(ptr nocapture readonly %src) {
; CHECK-LABEL: @dse_strncpy(
; CHECK-NEXT: [[A:%.*]] = alloca [256 x i8], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
%a = alloca [256 x i8], align 16
- call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call ptr @strncpy(ptr nonnull %a, ptr nonnull dereferenceable(1) %src, i64 6)
- call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret void
}
define void @dse_strcat(ptr nocapture readonly %src) {
; CHECK-LABEL: @dse_strcat(
; CHECK-NEXT: [[A:%.*]] = alloca [256 x i8], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
%a = alloca [256 x i8], align 16
- call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call ptr @strcat(ptr nonnull %a, ptr nonnull dereferenceable(1) %src)
- call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret void
}
define void @dse_strncat(ptr nocapture readonly %src) {
; CHECK-LABEL: @dse_strncat(
; CHECK-NEXT: [[A:%.*]] = alloca [256 x i8], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 256, ptr nonnull [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 256, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
%a = alloca [256 x i8], align 16
- call void @llvm.lifetime.start.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call ptr @strncat(ptr nonnull %a, ptr nonnull dereferenceable(1) %src, i64 6)
- call void @llvm.lifetime.end.p0(i64 256, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/Transforms/DeadStoreElimination/lifetime.ll b/llvm/test/Transforms/DeadStoreElimination/lifetime.ll
index f2a372ea..3d74c84 100644
--- a/llvm/test/Transforms/DeadStoreElimination/lifetime.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/lifetime.ll
@@ -3,20 +3,20 @@
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
declare void @llvm.memset.p0.i8(ptr nocapture, i8, i8, i1) nounwind
define void @test1() {
; CHECK-LABEL: @test1(
; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%A = alloca i8
store i8 0, ptr %A ;; Written to by memset
- call void @llvm.lifetime.end.p0(i64 1, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
call void @llvm.memset.p0.i8(ptr %A, i8 0, i8 -1, i1 false)
@@ -26,14 +26,14 @@ define void @test1() {
define void @test2(ptr %P) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[Q:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[Q]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[Q]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[Q]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Q]])
; CHECK-NEXT: ret void
;
%Q = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr %Q)
+ call void @llvm.lifetime.start.p0(ptr %Q)
store i32 0, ptr %Q ;; This store is dead.
- call void @llvm.lifetime.end.p0(i64 4, ptr %Q)
+ call void @llvm.lifetime.end.p0(ptr %Q)
ret void
}
diff --git a/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll b/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll
index 7dd8e41..264e816 100644
--- a/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/memcpy-lifetimes.ll
@@ -15,7 +15,7 @@ define ptr @alloc_tree() {
; CHECK-LABEL: @alloc_tree(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[FVAL:%.*]] = alloca [4 x ptr], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[FVAL]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[FVAL]])
; CHECK-NEXT: [[CALL:%.*]] = tail call dereferenceable_or_null(192) ptr @malloc(i64 192)
; CHECK-NEXT: [[CALL3:%.*]] = tail call ptr @alloc(ptr [[CALL]])
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x ptr], ptr [[FVAL]], i64 0, i64 3
@@ -29,12 +29,12 @@ define ptr @alloc_tree() {
; CHECK-NEXT: [[CALL3_3:%.*]] = tail call ptr @alloc(ptr [[CALL]])
; CHECK-NEXT: store ptr [[CALL3_3]], ptr [[FVAL]], align 16
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(32) [[CALL]], ptr nonnull align 16 dereferenceable(32) [[FVAL]], i64 32, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[FVAL]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[FVAL]])
; CHECK-NEXT: ret ptr [[CALL]]
;
entry:
%fval = alloca [4 x ptr], align 16
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %fval) #7
+ call void @llvm.lifetime.start.p0(ptr nonnull %fval) #7
%call = tail call dereferenceable_or_null(192) ptr @malloc(i64 192) #8
%call3 = tail call ptr @alloc(ptr %call)
%arrayidx = getelementptr inbounds [4 x ptr], ptr %fval, i64 0, i64 3
@@ -48,11 +48,11 @@ entry:
%call3.3 = tail call ptr @alloc(ptr %call)
store ptr %call3.3, ptr %fval, align 16
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(32) %call, ptr nonnull align 16 dereferenceable(32) %fval, i64 32, i1 false)
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %fval) #7
+ call void @llvm.lifetime.end.p0(ptr nonnull %fval) #7
ret ptr %call
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare noalias ptr @malloc(i64)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
diff --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll
index f3f5cb1..112e9f4 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-loop-carried-dependence.ll
@@ -103,7 +103,7 @@ define void @test.2() {
; CHECK-NEXT: [[C_2:%.*]] = icmp slt i64 [[IV_2_NEXT]], 100
; CHECK-NEXT: br i1 [[C_2]], label [[LOOP_2]], label [[EXIT:%.*]]
; CHECK: exit:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 400, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
entry:
@@ -136,11 +136,11 @@ loop.2:
br i1 %c.2, label %loop.2, label %exit
exit:
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %A) #5
+ call void @llvm.lifetime.end.p0(ptr nonnull %A) #5
ret void
}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Make sure `store i32 10, ptr %ptr.2` in %cond.store is not removed. The
; stored value may be read by `%use = load i32, ptr %ptr.1` in a future
@@ -171,7 +171,7 @@ define void@test.3() {
; CHECK-NEXT: [[DEPTH_1_BE]] = phi i32 [ [[SUB]], [[COND_READ]] ], [ [[INC]], [[COND_STORE]] ]
; CHECK-NEXT: br label [[LOOP_HEADER]]
; CHECK: cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 48, ptr nonnull [[NODESTACK]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[NODESTACK]])
; CHECK-NEXT: ret void
;
entry:
@@ -203,7 +203,7 @@ loop.latch:
br label %loop.header
cleanup: ; preds = %while.body, %while.end, %entry
- call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %nodeStack) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %nodeStack) #3
ret void
}
diff --git a/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll b/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll
index d32d562..8ecc7939 100644
--- a/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/multiblock-malloc-free.ll
@@ -4,8 +4,8 @@
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
declare void @unknown_func()
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i32, i1) nounwind
diff --git a/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll b/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll
index 3712bec..7293228 100644
--- a/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/nounwind-invoke.ll
@@ -9,7 +9,7 @@ define void @test_nounwind_invoke() personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: invoke void @foo(ptr [[TMP]])
; CHECK-NEXT: to label [[BB1:%.*]] unwind label [[BB2:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP]])
; CHECK-NEXT: ret void
; CHECK: bb2:
; CHECK-NEXT: [[ABCTMP1:%.*]] = landingpad { ptr, i32 }
@@ -26,7 +26,7 @@ bb:
to label %bb1 unwind label %bb2
bb1: ; preds = %bb
- call void @llvm.lifetime.end.p0(i64 4, ptr %tmp)
+ call void @llvm.lifetime.end.p0(ptr %tmp)
ret void
bb2: ; preds = %bb
@@ -36,7 +36,7 @@ bb2: ; preds = %bb
}
; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
; Function Attrs: argmemonly nounwind willreturn
declare void @foo(ptr) #1
declare i32 @__gxx_personality_v0(...)
diff --git a/llvm/test/Transforms/DeadStoreElimination/simple.ll b/llvm/test/Transforms/DeadStoreElimination/simple.ll
index 6c04e15..9d28395 100644
--- a/llvm/test/Transforms/DeadStoreElimination/simple.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/simple.ll
@@ -697,26 +697,26 @@ define void @test39_atomic(ptr %P, ptr %Q, ptr %R) {
declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i32)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
define void @test40(ptr noalias %Pp, ptr noalias %Q) {
; CHECK-LABEL: @test40(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
; CHECK-NEXT: [[PC:%.*]] = load ptr, ptr [[PP:%.*]], align 8
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 [[A]], ptr align 4 [[Q:%.*]], i64 4, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[PC]], ptr nonnull align 4 [[A]], i64 4, i1 true)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
entry:
%A = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %A)
+ call void @llvm.lifetime.start.p0(ptr nonnull %A)
%Pc = load ptr, ptr %Pp, align 8
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 4 %A, ptr align 4 %Q, i64 4, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %Pc, ptr nonnull align 4 %A, i64 4, i1 true)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %A)
+ call void @llvm.lifetime.end.p0(ptr nonnull %A)
ret void
}
diff --git a/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll b/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll
index df2feb0..0970ed3 100644
--- a/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/trivial-dse-calls.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=dse -S < %s | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @unknown()
declare void @f(ptr)
@@ -23,14 +23,14 @@ define void @test_dead() {
define void @test_lifetime() {
; CHECK-LABEL: @test_lifetime(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -39,18 +39,18 @@ define void @test_lifetime() {
define void @test_lifetime2() {
; CHECK-LABEL: @test_lifetime2(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: call void @unknown()
; CHECK-NEXT: call void @unknown()
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @unknown()
call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
call void @unknown()
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
diff --git a/llvm/test/Transforms/EarlyCSE/memoryssa.ll b/llvm/test/Transforms/EarlyCSE/memoryssa.ll
index ba4cce4..f7f7ba3 100644
--- a/llvm/test/Transforms/EarlyCSE/memoryssa.ll
+++ b/llvm/test/Transforms/EarlyCSE/memoryssa.ll
@@ -146,12 +146,12 @@ define void @test_writeback_lifetimes() {
; CHECK-NOMEMSSA-LABEL: @test_writeback_lifetimes(
; CHECK-NOMEMSSA-NEXT: entry:
; CHECK-NOMEMSSA-NEXT: [[P:%.*]] = alloca i64, align 8
-; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NOMEMSSA-NEXT: [[Q:%.*]] = getelementptr i32, ptr [[P]], i64 1
; CHECK-NOMEMSSA-NEXT: [[PV:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NOMEMSSA-NEXT: [[QV:%.*]] = load i32, ptr [[Q]], align 4
-; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]])
-; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
+; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NOMEMSSA-NEXT: store i32 [[PV]], ptr [[P]], align 4
; CHECK-NOMEMSSA-NEXT: store i32 [[QV]], ptr [[Q]], align 4
; CHECK-NOMEMSSA-NEXT: ret void
@@ -159,24 +159,24 @@ define void @test_writeback_lifetimes() {
; CHECK-LABEL: @test_writeback_lifetimes(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NEXT: [[Q:%.*]] = getelementptr i32, ptr [[P]], i64 1
; CHECK-NEXT: [[PV:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NEXT: [[QV:%.*]] = load i32, ptr [[Q]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NEXT: store i32 [[PV]], ptr [[P]], align 4
; CHECK-NEXT: store i32 [[QV]], ptr [[Q]], align 4
; CHECK-NEXT: ret void
;
entry:
%p = alloca i64
- call void @llvm.lifetime.start.p0(i64 8, ptr %p)
+ call void @llvm.lifetime.start.p0(ptr %p)
%q = getelementptr i32, ptr %p, i64 1
%pv = load i32, ptr %p
%qv = load i32, ptr %q
- call void @llvm.lifetime.end.p0(i64 8, ptr %p)
- call void @llvm.lifetime.start.p0(i64 8, ptr %p)
+ call void @llvm.lifetime.end.p0(ptr %p)
+ call void @llvm.lifetime.start.p0(ptr %p)
store i32 %pv, ptr %p
store i32 %qv, ptr %q
ret void
@@ -188,11 +188,11 @@ define void @test_writeback_lifetimes_multi_arg(ptr %q) {
; CHECK-NOMEMSSA-LABEL: @test_writeback_lifetimes_multi_arg(
; CHECK-NOMEMSSA-NEXT: entry:
; CHECK-NOMEMSSA-NEXT: [[P:%.*]] = alloca i64, align 8
-; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NOMEMSSA-NEXT: [[PV:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NOMEMSSA-NEXT: [[QV:%.*]] = load i32, ptr [[Q:%.*]], align 4
-; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]])
-; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
+; CHECK-NOMEMSSA-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NOMEMSSA-NEXT: store i32 [[PV]], ptr [[P]], align 4
; CHECK-NOMEMSSA-NEXT: store i32 [[QV]], ptr [[Q]], align 4
; CHECK-NOMEMSSA-NEXT: ret void
@@ -200,25 +200,25 @@ define void @test_writeback_lifetimes_multi_arg(ptr %q) {
; CHECK-LABEL: @test_writeback_lifetimes_multi_arg(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NEXT: [[PV:%.*]] = load i32, ptr [[P]], align 4
; CHECK-NEXT: [[QV:%.*]] = load i32, ptr [[Q:%.*]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[P]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NEXT: store i32 [[PV]], ptr [[P]], align 4
; CHECK-NEXT: ret void
;
entry:
%p = alloca i64
- call void @llvm.lifetime.start.p0(i64 8, ptr %p)
+ call void @llvm.lifetime.start.p0(ptr %p)
%pv = load i32, ptr %p
%qv = load i32, ptr %q
- call void @llvm.lifetime.end.p0(i64 8, ptr %p)
- call void @llvm.lifetime.start.p0(i64 8, ptr %p)
+ call void @llvm.lifetime.end.p0(ptr %p)
+ call void @llvm.lifetime.start.p0(ptr %p)
store i32 %pv, ptr %p
store i32 %qv, ptr %q
ret void
}
-declare void @llvm.lifetime.end.p0(i64, ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.lifetime.end.p0(ptr)
+declare void @llvm.lifetime.start.p0(ptr)
diff --git a/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll b/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll
index f7e21cd..736b072 100644
--- a/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll
+++ b/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-linkage.ll
@@ -25,11 +25,11 @@ define void @defn_simple(...) {
; OPT-LABEL: define {{[^@]+}}@defn_simple(...) {
; OPT-NEXT: entry:
; OPT-NEXT: %va_start = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start)
; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start)
; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4
; OPT-NEXT: call void @defn_simple.valist(ptr %0)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start)
; OPT-NEXT: ret void
;
; ABI-LABEL: define {{[^@]+}}@defn_simple(ptr %varargs) {
@@ -50,11 +50,11 @@ define private void @defn_private_simple(...) {
; OPT-LABEL: define {{[^@]+}}@defn_private_simple(...) {
; OPT-NEXT: entry:
; OPT-NEXT: %va_start = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start)
; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start)
; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4
; OPT-NEXT: call void @defn_private_simple.valist(ptr %0)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start)
; OPT-NEXT: ret void
;
; ABI-LABEL: define {{[^@]+}}@defn_private_simple(ptr %varargs) {
@@ -75,11 +75,11 @@ define internal void @defn_internal_simple(...) {
; OPT-LABEL: define {{[^@]+}}@defn_internal_simple(...) {
; OPT-NEXT: entry:
; OPT-NEXT: %va_start = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start)
; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start)
; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4
; OPT-NEXT: call void @defn_internal_simple.valist(ptr %0)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start)
; OPT-NEXT: ret void
;
; ABI-LABEL: define {{[^@]+}}@defn_internal_simple(ptr %varargs) {
@@ -211,11 +211,11 @@ define external void @defn_external_simple(...) {
; OPT-LABEL: define {{[^@]+}}@defn_external_simple(...) {
; OPT-NEXT: entry:
; OPT-NEXT: %va_start = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start)
; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start)
; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4
; OPT-NEXT: call void @defn_external_simple.valist(ptr %0)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start)
; OPT-NEXT: ret void
;
; ABI-LABEL: define {{[^@]+}}@defn_external_simple(ptr %varargs) {
diff --git a/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll b/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll
index 96cc826..e21b72d 100644
--- a/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll
+++ b/llvm/test/Transforms/ExpandVariadics/expand-va-intrinsic-split-simple.ll
@@ -10,11 +10,11 @@ define i32 @variadic_int_double_get_firstz(...) {
; OPT-LABEL: define {{[^@]+}}@variadic_int_double_get_firstz(...) {
; OPT-NEXT: entry:
; OPT-NEXT: %va_start = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start)
; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start)
; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4
; OPT-NEXT: %1 = call i32 @variadic_int_double_get_firstz.valist(ptr %0)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start)
; OPT-NEXT: ret i32 %1
;
; ABI-LABEL: define {{[^@]+}}@variadic_int_double_get_firstz(ptr %varargs) {
@@ -61,11 +61,11 @@ define double @variadic_int_double_get_secondz(...) {
; OPT-LABEL: define {{[^@]+}}@variadic_int_double_get_secondz(...) {
; OPT-NEXT: entry:
; OPT-NEXT: %va_start = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %va_start)
; OPT-NEXT: call void @llvm.va_start.p0(ptr %va_start)
; OPT-NEXT: %0 = load ptr, ptr %va_start, align 4
; OPT-NEXT: %1 = call double @variadic_int_double_get_secondz.valist(ptr %0)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %va_start)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %va_start)
; OPT-NEXT: ret double %1
;
; ABI-LABEL: define {{[^@]+}}@variadic_int_double_get_secondz(ptr %varargs) {
@@ -115,13 +115,13 @@ entry:
; CHECK-LABEL: @variadic_can_get_firstIidEEbT_T0_(i32 %x, double %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %variadic_can_get_firstIidEEbT_T0_.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store double %y, ptr %1, align 4
; CHECK-NEXT: %call = call i32 @variadic_int_double_get_firstz.valist(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: %cmp.i = icmp eq i32 %call, %x
; CHECK-NEXT: ret i1 %cmp.i
; CHECK-NEXT: }
@@ -130,26 +130,26 @@ define zeroext i1 @variadic_can_get_firstIidEEbT_T0_(i32 %x, double %y) {
; OPT-LABEL: define {{[^@]+}}@variadic_can_get_firstIidEEbT_T0_(i32 %x, double %y) {
; OPT-NEXT: entry:
; OPT-NEXT: %vararg_buffer = alloca %variadic_can_get_firstIidEEbT_T0_.vararg, align 16
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; OPT-NEXT: %0 = getelementptr inbounds nuw %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0
; OPT-NEXT: store i32 %x, ptr %0, align 4
; OPT-NEXT: %1 = getelementptr inbounds nuw %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 2
; OPT-NEXT: store double %y, ptr %1, align 8
; OPT-NEXT: %call = call i32 @variadic_int_double_get_firstz.valist(ptr %vararg_buffer)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; OPT-NEXT: %cmp.i = icmp eq i32 %call, %x
; OPT-NEXT: ret i1 %cmp.i
;
; ABI-LABEL: define {{[^@]+}}@variadic_can_get_firstIidEEbT_T0_(i32 %x, double %y) {
; ABI-NEXT: entry:
; ABI-NEXT: %vararg_buffer = alloca %variadic_can_get_firstIidEEbT_T0_.vararg, align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer)
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; ABI-NEXT: %0 = getelementptr inbounds nuw %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0
; ABI-NEXT: store i32 %x, ptr %0, align 4
; ABI-NEXT: %1 = getelementptr inbounds nuw %variadic_can_get_firstIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 2
; ABI-NEXT: store double %y, ptr %1, align 8
; ABI-NEXT: %call = call i32 @variadic_int_double_get_firstz(ptr %vararg_buffer)
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer)
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; ABI-NEXT: %cmp.i = icmp eq i32 %call, %x
; ABI-NEXT: ret i1 %cmp.i
;
@@ -162,13 +162,13 @@ entry:
; CHECK-LABEL: @variadic_can_get_secondIidEEbT_T0_(i32 %x, double %y) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %vararg_buffer = alloca %variadic_can_get_secondIidEEbT_T0_.vararg, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; CHECK-NEXT: %0 = getelementptr inbounds %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0
; CHECK-NEXT: store i32 %x, ptr %0, align 4
; CHECK-NEXT: %1 = getelementptr inbounds %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 1
; CHECK-NEXT: store double %y, ptr %1, align 4
; CHECK-NEXT: %call = call double @variadic_int_double_get_secondz.valist(ptr %vararg_buffer)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr %vararg_buffer)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; CHECK-NEXT: %cmp.i = fcmp oeq double %call, %y
; CHECK-NEXT: ret i1 %cmp.i
; CHECK-NEXT: }
@@ -177,26 +177,26 @@ define zeroext i1 @variadic_can_get_secondIidEEbT_T0_(i32 %x, double %y) {
; OPT-LABEL: define {{[^@]+}}@variadic_can_get_secondIidEEbT_T0_(i32 %x, double %y) {
; OPT-NEXT: entry:
; OPT-NEXT: %vararg_buffer = alloca %variadic_can_get_secondIidEEbT_T0_.vararg, align 16
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer)
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; OPT-NEXT: %0 = getelementptr inbounds nuw %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0
; OPT-NEXT: store i32 %x, ptr %0, align 4
; OPT-NEXT: %1 = getelementptr inbounds nuw %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 2
; OPT-NEXT: store double %y, ptr %1, align 8
; OPT-NEXT: %call = call double @variadic_int_double_get_secondz.valist(ptr %vararg_buffer)
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer)
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; OPT-NEXT: %cmp.i = fcmp oeq double %call, %y
; OPT-NEXT: ret i1 %cmp.i
;
; ABI-LABEL: define {{[^@]+}}@variadic_can_get_secondIidEEbT_T0_(i32 %x, double %y) {
; ABI-NEXT: entry:
; ABI-NEXT: %vararg_buffer = alloca %variadic_can_get_secondIidEEbT_T0_.vararg, align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr %vararg_buffer)
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr %vararg_buffer)
; ABI-NEXT: %0 = getelementptr inbounds nuw %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 0
; ABI-NEXT: store i32 %x, ptr %0, align 4
; ABI-NEXT: %1 = getelementptr inbounds nuw %variadic_can_get_secondIidEEbT_T0_.vararg, ptr %vararg_buffer, i32 0, i32 2
; ABI-NEXT: store double %y, ptr %1, align 8
; ABI-NEXT: %call = call double @variadic_int_double_get_secondz(ptr %vararg_buffer)
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr %vararg_buffer)
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr %vararg_buffer)
; ABI-NEXT: %cmp.i = fcmp oeq double %call, %y
; ABI-NEXT: ret i1 %cmp.i
;
diff --git a/llvm/test/Transforms/ExpandVariadics/indirect-calls.ll b/llvm/test/Transforms/ExpandVariadics/indirect-calls.ll
index b661f7f..0f178c7 100644
--- a/llvm/test/Transforms/ExpandVariadics/indirect-calls.ll
+++ b/llvm/test/Transforms/ExpandVariadics/indirect-calls.ll
@@ -19,11 +19,11 @@ define hidden void @fptr_single_i32(i32 noundef %x) {
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[FPTR_SINGLE_I32_VARARG:%.*]], align 16
; ABI-NEXT: [[TMP0:%.*]] = load volatile ptr, ptr @vararg_ptr, align 4
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[FPTR_SINGLE_I32_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP1]], align 4
; ABI-NEXT: call void [[TMP0]](ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -45,11 +45,11 @@ define hidden void @fptr_libcS(ptr noundef byval(%struct.libcS) align 8 %x) {
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[FPTR_LIBCS_VARARG:%.*]], align 16
; ABI-NEXT: [[TMP0:%.*]] = load volatile ptr, ptr @vararg_ptr, align 4
; ABI-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[INDIRECTALLOCA]], ptr [[X:%.*]], i64 24, i1 false)
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[FPTR_LIBCS_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP1]], align 4
; ABI-NEXT: call void [[TMP0]](ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/ExpandVariadics/intrinsics.ll b/llvm/test/Transforms/ExpandVariadics/intrinsics.ll
index 1782c92..52ce80e 100644
--- a/llvm/test/Transforms/ExpandVariadics/intrinsics.ll
+++ b/llvm/test/Transforms/ExpandVariadics/intrinsics.ll
@@ -3,13 +3,13 @@
; RUN: opt -mtriple=wasm32-unknown-unknown -S --passes=expand-variadics --expand-variadics-override=lowering < %s | FileCheck %s -check-prefixes=CHECK,ABI
; REQUIRES: webassembly-registered-target
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.va_copy.p0(ptr, ptr)
declare void @valist(ptr noundef)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.va_start.p0(ptr)
@@ -20,31 +20,31 @@ define void @start_once(...) {
; OPT-LABEL: @start_once(
; OPT-NEXT: entry:
; OPT-NEXT: [[VA_START:%.*]] = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VA_START]])
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr [[VA_START]])
; OPT-NEXT: call void @llvm.va_start.p0(ptr [[VA_START]])
; OPT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[VA_START]], align 4
; OPT-NEXT: call void @start_once.valist(ptr [[TMP0]])
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VA_START]])
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr [[VA_START]])
; OPT-NEXT: ret void
;
; ABI-LABEL: @start_once(
; ABI-NEXT: entry:
; ABI-NEXT: [[S:%.*]] = alloca ptr, align 4
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[S]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S]])
; ABI-NEXT: store ptr [[VARARGS:%.*]], ptr [[S]], align 4
; ABI-NEXT: [[TMP0:%.*]] = load ptr, ptr [[S]], align 4
; ABI-NEXT: call void @valist(ptr noundef [[TMP0]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[S]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S]])
; ABI-NEXT: ret void
;
entry:
%s = alloca ptr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s)
call void @llvm.va_start.p0(ptr nonnull %s)
%0 = load ptr, ptr %s, align 4
call void @valist(ptr noundef %0)
call void @llvm.va_end.p0(ptr %s)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s)
ret void
}
@@ -53,34 +53,34 @@ define void @start_twice(...) {
; OPT-LABEL: @start_twice(
; OPT-NEXT: entry:
; OPT-NEXT: [[VA_START:%.*]] = alloca ptr, align 4
-; OPT-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VA_START]])
+; OPT-NEXT: call void @llvm.lifetime.start.p0(ptr [[VA_START]])
; OPT-NEXT: call void @llvm.va_start.p0(ptr [[VA_START]])
; OPT-NEXT: [[TMP0:%.*]] = load ptr, ptr [[VA_START]], align 4
; OPT-NEXT: call void @start_twice.valist(ptr [[TMP0]])
-; OPT-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VA_START]])
+; OPT-NEXT: call void @llvm.lifetime.end.p0(ptr [[VA_START]])
; OPT-NEXT: ret void
;
; ABI-LABEL: @start_twice(
; ABI-NEXT: entry:
; ABI-NEXT: [[S0:%.*]] = alloca ptr, align 4
; ABI-NEXT: [[S1:%.*]] = alloca ptr, align 4
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[S0]])
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[S1]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S0]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S1]])
; ABI-NEXT: store ptr [[VARARGS:%.*]], ptr [[S0]], align 4
; ABI-NEXT: [[TMP0:%.*]] = load ptr, ptr [[S0]], align 4
; ABI-NEXT: call void @valist(ptr noundef [[TMP0]])
; ABI-NEXT: store ptr [[VARARGS]], ptr [[S1]], align 4
; ABI-NEXT: [[TMP1:%.*]] = load ptr, ptr [[S1]], align 4
; ABI-NEXT: call void @valist(ptr noundef [[TMP1]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[S1]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[S0]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S1]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S0]])
; ABI-NEXT: ret void
;
entry:
%s0 = alloca ptr, align 4
%s1 = alloca ptr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s0)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %s1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s1)
call void @llvm.va_start.p0(ptr nonnull %s0)
%0 = load ptr, ptr %s0, align 4
call void @valist(ptr noundef %0)
@@ -89,8 +89,8 @@ entry:
%1 = load ptr, ptr %s1, align 4
call void @valist(ptr noundef %1)
call void @llvm.va_end.p0(ptr %s1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %s0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s0)
ret void
}
@@ -100,21 +100,21 @@ define void @copy(ptr noundef %va) {
; CHECK-NEXT: [[VA_ADDR:%.*]] = alloca ptr, align 4
; CHECK-NEXT: [[CP:%.*]] = alloca ptr, align 4
; CHECK-NEXT: store ptr [[VA:%.*]], ptr [[VA_ADDR]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[CP]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CP]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr [[CP]], ptr [[VA_ADDR]], i32 4, i1 false)
; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[CP]], align 4
; CHECK-NEXT: call void @valist(ptr noundef [[TMP0]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[CP]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CP]])
; CHECK-NEXT: ret void
;
entry:
%va.addr = alloca ptr, align 4
%cp = alloca ptr, align 4
store ptr %va, ptr %va.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %cp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %cp)
call void @llvm.va_copy.p0(ptr nonnull %cp, ptr nonnull %va.addr)
%0 = load ptr, ptr %cp, align 4
call void @valist(ptr noundef %0)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %cp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %cp)
ret void
}
diff --git a/llvm/test/Transforms/ExpandVariadics/pass-byval-byref.ll b/llvm/test/Transforms/ExpandVariadics/pass-byval-byref.ll
index a9f27f7..83b33b93 100644
--- a/llvm/test/Transforms/ExpandVariadics/pass-byval-byref.ll
+++ b/llvm/test/Transforms/ExpandVariadics/pass-byval-byref.ll
@@ -16,11 +16,11 @@ define void @pass_byval(ptr byval(i32) %b) {
; ABI-LABEL: @pass_byval(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_BYVAL_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[TMP0]], ptr [[B:%.*]], i64 4, i1 false)
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -41,13 +41,13 @@ define void @i32_libcS_byval(i32 %x, ptr noundef byval(%struct.libcS) align 8 %y
; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_LIBCS_BYVAL_VARARG:%.*]], align 16
; ABI-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[INDIRECTALLOCA]], ptr [[Y:%.*]], i64 24, i1 false)
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP1]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -66,13 +66,13 @@ define void @libcS_i32_byval(ptr byval(%struct.libcS) align 8 %x, i32 %y) {
; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[LIBCS_I32_BYVAL_VARARG:%.*]], align 16
; ABI-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[INDIRECTALLOCA]], ptr [[X:%.*]], i64 24, i1 false)
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_BYVAL_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store i32 [[Y:%.*]], ptr [[TMP1]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -90,11 +90,11 @@ define void @pass_byref(ptr byref(i32) %b) {
; ABI-LABEL: @pass_byref(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_BYREF_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store ptr [[B:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -113,13 +113,13 @@ define void @i32_libcS_byref(i32 %x, ptr noundef byref(%struct.libcS) align 8 %y
; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_LIBCS_BYREF_VARARG:%.*]], align 16
; ABI-NEXT: store ptr [[Y:%.*]], ptr [[INDIRECTALLOCA]], align 4
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP1]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -138,13 +138,13 @@ define void @libcS_i32_byref(ptr byref(%struct.libcS) align 8 %x, i32 %y) {
; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[LIBCS_I32_BYREF_VARARG:%.*]], align 16
; ABI-NEXT: store ptr [[X:%.*]], ptr [[INDIRECTALLOCA]], align 4
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_BYREF_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store i32 [[Y:%.*]], ptr [[TMP1]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/ExpandVariadics/pass-indirect.ll b/llvm/test/Transforms/ExpandVariadics/pass-indirect.ll
index 67cb269..46e1904 100644
--- a/llvm/test/Transforms/ExpandVariadics/pass-indirect.ll
+++ b/llvm/test/Transforms/ExpandVariadics/pass-indirect.ll
@@ -19,13 +19,13 @@ define void @i32_libcS(i32 %x, %struct.libcS %y) {
; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[I32_LIBCS_VARARG:%.*]], align 16
; ABI-NEXT: store [[STRUCT_LIBCS]] [[Y:%.*]], ptr [[INDIRECTALLOCA]], align 8
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[I32_LIBCS_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP1]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -44,13 +44,13 @@ define void @libcS_i32(%struct.libcS %x, i32 %y) {
; ABI-NEXT: [[INDIRECTALLOCA:%.*]] = alloca [[STRUCT_LIBCS:%.*]], align 8
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[LIBCS_I32_VARARG:%.*]], align 16
; ABI-NEXT: store [[STRUCT_LIBCS]] [[X:%.*]], ptr [[INDIRECTALLOCA]], align 8
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store ptr [[INDIRECTALLOCA]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[LIBCS_I32_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store i32 [[Y:%.*]], ptr [[TMP1]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/ExpandVariadics/pass-integers.ll b/llvm/test/Transforms/ExpandVariadics/pass-integers.ll
index 7a0c004..cf52724 100644
--- a/llvm/test/Transforms/ExpandVariadics/pass-integers.ll
+++ b/llvm/test/Transforms/ExpandVariadics/pass-integers.ll
@@ -17,9 +17,9 @@ define void @pass_nothing() {
; ABI-LABEL: @pass_nothing(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_NOTHING_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -36,11 +36,11 @@ define void @pass_s1(i8 %x) {
; ABI-LABEL: @pass_s1(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S1_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S1_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i8 [[X:%.*]], ptr [[TMP0]], align 1
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -57,11 +57,11 @@ define void @pass_s2(i16 %x) {
; ABI-LABEL: @pass_s2(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S2_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S2_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i16 [[X:%.*]], ptr [[TMP0]], align 2
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -78,11 +78,11 @@ define void @pass_s3(i32 %x) {
; ABI-LABEL: @pass_s3(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S3_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S3_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -99,11 +99,11 @@ define void @pass_s4(i64 %x) {
; ABI-LABEL: @pass_s4(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S4_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S4_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i64 [[X:%.*]], ptr [[TMP0]], align 8
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -120,11 +120,11 @@ define void @pass_s5(<4 x i32> %x) {
; ABI-LABEL: @pass_s5(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_S5_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_S5_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store <4 x i32> [[X:%.*]], ptr [[TMP0]], align 16
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -141,13 +141,13 @@ define void @pass_int_s1(i32 %i, i8 %x) {
; ABI-LABEL: @pass_int_s1(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S1_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 5, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S1_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S1_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store i8 [[X:%.*]], ptr [[TMP1]], align 1
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 5, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -164,13 +164,13 @@ define void @pass_int_s2(i32 %i, i16 %x) {
; ABI-LABEL: @pass_int_s2(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S2_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 6, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S2_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S2_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store i16 [[X:%.*]], ptr [[TMP1]], align 2
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 6, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -187,13 +187,13 @@ define void @pass_int_s3(i32 %i, i32 %x) {
; ABI-LABEL: @pass_int_s3(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S3_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S3_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S3_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
; ABI-NEXT: store i32 [[X:%.*]], ptr [[TMP1]], align 4
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -210,13 +210,13 @@ define void @pass_int_s4(i32 %i, i64 %x) {
; ABI-LABEL: @pass_int_s4(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S4_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S4_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S4_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 2
; ABI-NEXT: store i64 [[X:%.*]], ptr [[TMP1]], align 8
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -233,13 +233,13 @@ define void @pass_int_s5(i32 %i, <4 x i32> %x) {
; ABI-LABEL: @pass_int_s5(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_INT_S5_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_INT_S5_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_INT_S5_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 2
; ABI-NEXT: store <4 x i32> [[X:%.*]], ptr [[TMP1]], align 16
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -256,7 +256,7 @@ define void @pass_asc(i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32> %x5) {
; ABI-LABEL: @pass_asc(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_ASC_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 48, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_ASC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i8 [[X1:%.*]], ptr [[TMP0]], align 1
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_ASC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 2
@@ -268,7 +268,7 @@ define void @pass_asc(i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32> %x5) {
; ABI-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[PASS_ASC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 8
; ABI-NEXT: store <4 x i32> [[X5:%.*]], ptr [[TMP4]], align 16
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 48, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -285,7 +285,7 @@ define void @pass_dsc(<4 x i32> %x0, i64 %x1, i32 %x2, i16 %x3, i8 %x4) {
; ABI-LABEL: @pass_dsc(
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_DSC_VARARG:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 33, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_DSC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store <4 x i32> [[X0:%.*]], ptr [[TMP0]], align 16
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_DSC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
@@ -297,7 +297,7 @@ define void @pass_dsc(<4 x i32> %x0, i64 %x1, i32 %x2, i16 %x3, i8 %x4) {
; ABI-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[PASS_DSC_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 5
; ABI-NEXT: store i8 [[X4:%.*]], ptr [[TMP4]], align 1
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 33, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: ret void
;
entry:
@@ -316,7 +316,7 @@ define void @pass_multiple(i32 %i, i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32>
; ABI-NEXT: entry:
; ABI-NEXT: [[VARARG_BUFFER:%.*]] = alloca [[PASS_MULTIPLE_VARARG:%.*]], align 16
; ABI-NEXT: [[VARARG_BUFFER1:%.*]] = alloca [[PASS_MULTIPLE_VARARG_0:%.*]], align 16
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER]])
; ABI-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 0
; ABI-NEXT: store i32 [[I:%.*]], ptr [[TMP0]], align 4
; ABI-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 1
@@ -324,8 +324,8 @@ define void @pass_multiple(i32 %i, i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32>
; ABI-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG]], ptr [[VARARG_BUFFER]], i32 0, i32 3
; ABI-NEXT: store i64 [[X4:%.*]], ptr [[TMP2]], align 8
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[VARARG_BUFFER]])
-; ABI-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VARARG_BUFFER1]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER]])
+; ABI-NEXT: call void @llvm.lifetime.start.p0(ptr [[VARARG_BUFFER1]])
; ABI-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG_0]], ptr [[VARARG_BUFFER1]], i32 0, i32 0
; ABI-NEXT: store i32 [[I]], ptr [[TMP3]], align 4
; ABI-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG_0]], ptr [[VARARG_BUFFER1]], i32 0, i32 1
@@ -335,7 +335,7 @@ define void @pass_multiple(i32 %i, i8 %x1, i16 %x2, i32 %x3, i64 %x4, <4 x i32>
; ABI-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[PASS_MULTIPLE_VARARG_0]], ptr [[VARARG_BUFFER1]], i32 0, i32 5
; ABI-NEXT: store <4 x i32> [[X5:%.*]], ptr [[TMP6]], align 16
; ABI-NEXT: call void @sink(ptr [[VARARG_BUFFER1]])
-; ABI-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[VARARG_BUFFER1]])
+; ABI-NEXT: call void @llvm.lifetime.end.p0(ptr [[VARARG_BUFFER1]])
; ABI-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/GVN/cond_br2.ll b/llvm/test/Transforms/GVN/cond_br2.ll
index 4202467..ff80328 100644
--- a/llvm/test/Transforms/GVN/cond_br2.ll
+++ b/llvm/test/Transforms/GVN/cond_br2.ll
@@ -17,7 +17,7 @@ define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
entry:
%sv = alloca %"class.llvm::SmallVector", align 16
- call void @llvm.lifetime.start.p0(i64 64, ptr %sv) #1
+ call void @llvm.lifetime.start.p0(ptr %sv) #1
%FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
store ptr %FirstEl.i.i.i.i.i.i, ptr %sv, align 16, !tbaa !4
%EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
@@ -86,7 +86,7 @@ if.then.i.i.i20: ; preds = %invoke.cont3
br label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21
_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.then.i.i.i20
- call void @llvm.lifetime.end.p0(i64 64, ptr %sv) #1
+ call void @llvm.lifetime.end.p0(ptr %sv) #1
ret void
lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2
@@ -105,14 +105,14 @@ eh.resume: ; preds = %if.then.i.i.i, %lpa
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @__gxx_personality_v0(...)
declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr) #2
; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr, i64, i64) #2
diff --git a/llvm/test/Transforms/GVN/lifetime-simple.ll b/llvm/test/Transforms/GVN/lifetime-simple.ll
index 89ca127..bd35052 100644
--- a/llvm/test/Transforms/GVN/lifetime-simple.ll
+++ b/llvm/test/Transforms/GVN/lifetime-simple.ll
@@ -6,18 +6,18 @@ define i8 @test() nounwind {
; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[P:%.*]] = alloca [32 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NEXT: store i8 1, ptr [[P]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[P]], align 1
; CHECK-NEXT: ret i8 [[TMP0]]
;
entry:
%P = alloca [32 x i8]
- call void @llvm.lifetime.start.p0(i64 32, ptr %P)
+ call void @llvm.lifetime.start.p0(ptr %P)
%0 = load i8, ptr %P
store i8 1, ptr %P
- call void @llvm.lifetime.end.p0(i64 32, ptr %P)
+ call void @llvm.lifetime.end.p0(ptr %P)
%1 = load i8, ptr %P
ret i8 %1
}
@@ -28,17 +28,17 @@ define void @assume_eq_arg(ptr %arg) {
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[ALLOCA]], [[ARG]]
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ALLOCA]])
; CHECK-NEXT: store volatile i32 0, ptr [[ALLOCA]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOCA]])
; CHECK-NEXT: ret void
;
%alloca = alloca i32
%cmp = icmp eq ptr %alloca, %arg
call void @llvm.assume(i1 %cmp)
- call void @llvm.lifetime.start.p0(i64 4, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
store volatile i32 0, ptr %alloca
- call void @llvm.lifetime.end.p0(i64 4, ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
ret void
}
@@ -47,17 +47,17 @@ define void @assume_eq_null() {
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4, addrspace(1)
; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(1) [[ALLOCA]], null
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p1(i64 4, ptr addrspace(1) [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p1(ptr addrspace(1) [[ALLOCA]])
; CHECK-NEXT: store volatile i32 0, ptr addrspace(1) null, align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p1(i64 4, ptr addrspace(1) [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p1(ptr addrspace(1) [[ALLOCA]])
; CHECK-NEXT: ret void
;
%alloca = alloca i32, addrspace(1)
%cmp = icmp eq ptr addrspace(1) %alloca, null
call void @llvm.assume(i1 %cmp)
- call void @llvm.lifetime.start.p1(i64 4, ptr addrspace(1) %alloca)
+ call void @llvm.lifetime.start.p1(ptr addrspace(1) %alloca)
store volatile i32 0, ptr addrspace(1) %alloca
- call void @llvm.lifetime.end.p1(i64 4, ptr addrspace(1) %alloca)
+ call void @llvm.lifetime.end.p1(ptr addrspace(1) %alloca)
ret void
}
@@ -67,9 +67,9 @@ define void @dom_eq_null() {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(1) [[ALLOCA]], null
; CHECK-NEXT: br i1 [[CMP]], label %[[IF:.*]], label %[[ELSE:.*]]
; CHECK: [[IF]]:
-; CHECK-NEXT: call void @llvm.lifetime.start.p1(i64 4, ptr addrspace(1) [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p1(ptr addrspace(1) [[ALLOCA]])
; CHECK-NEXT: store volatile i32 0, ptr addrspace(1) null, align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p1(i64 4, ptr addrspace(1) [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p1(ptr addrspace(1) [[ALLOCA]])
; CHECK-NEXT: ret void
; CHECK: [[ELSE]]:
; CHECK-NEXT: ret void
@@ -79,14 +79,14 @@ define void @dom_eq_null() {
br i1 %cmp, label %if, label %else
if:
- call void @llvm.lifetime.start.p1(i64 4, ptr addrspace(1) %alloca)
+ call void @llvm.lifetime.start.p1(ptr addrspace(1) %alloca)
store volatile i32 0, ptr addrspace(1) %alloca
- call void @llvm.lifetime.end.p1(i64 4, ptr addrspace(1) %alloca)
+ call void @llvm.lifetime.end.p1(ptr addrspace(1) %alloca)
ret void
else:
ret void
}
-declare void @llvm.lifetime.start.p0(i64 %S, ptr nocapture %P) readonly
-declare void @llvm.lifetime.end.p0(i64 %S, ptr nocapture %P)
+declare void @llvm.lifetime.start.p0(ptr nocapture %P) readonly
+declare void @llvm.lifetime.end.p0(ptr nocapture %P)
diff --git a/llvm/test/Transforms/GVN/opt-remarks.ll b/llvm/test/Transforms/GVN/opt-remarks.ll
index 87cd54d..a5c3cb5c 100644
--- a/llvm/test/Transforms/GVN/opt-remarks.ll
+++ b/llvm/test/Transforms/GVN/opt-remarks.ll
@@ -109,9 +109,9 @@ entry:
define i8 @lifetime_end(i8 %val) {
%p = alloca [32 x i8]
- call void @llvm.lifetime.start.p0(i64 32, ptr %p)
+ call void @llvm.lifetime.start.p0(ptr %p)
store i8 %val, ptr %p
- call void @llvm.lifetime.end.p0(i64 32, ptr %p)
+ call void @llvm.lifetime.end.p0(ptr %p)
%1 = load i8, ptr %p
ret i8 %1
}
diff --git a/llvm/test/Transforms/GVN/vscale.ll b/llvm/test/Transforms/GVN/vscale.ll
index 5d6c559..b358df5 100644
--- a/llvm/test/Transforms/GVN/vscale.ll
+++ b/llvm/test/Transforms/GVN/vscale.ll
@@ -696,7 +696,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
; MDEP-LABEL: @bigexample(
; MDEP-NEXT: entry:
; MDEP-NEXT: [[REF_TMP:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16
-; MDEP-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull [[REF_TMP]])
+; MDEP-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[REF_TMP]])
; MDEP-NEXT: [[A_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[A:%.*]], 0
; MDEP-NEXT: store <vscale x 4 x i32> [[A_ELT]], ptr [[REF_TMP]], align 16
; MDEP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
@@ -720,13 +720,13 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
; MDEP-NEXT: [[TMP9:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP7]], <vscale x 16 x i8> [[TMP8]], 2
; MDEP-NEXT: [[TMP10:%.*]] = bitcast <vscale x 4 x i32> [[A_ELT6]] to <vscale x 16 x i8>
; MDEP-NEXT: [[TMP11:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP9]], <vscale x 16 x i8> [[TMP10]], 3
-; MDEP-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull [[REF_TMP]])
+; MDEP-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[REF_TMP]])
; MDEP-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP11]]
;
; MSSA-LABEL: @bigexample(
; MSSA-NEXT: entry:
; MSSA-NEXT: [[REF_TMP:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16
-; MSSA-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull [[REF_TMP]])
+; MSSA-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[REF_TMP]])
; MSSA-NEXT: [[A_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[A:%.*]], 0
; MSSA-NEXT: store <vscale x 4 x i32> [[A_ELT]], ptr [[REF_TMP]], align 16
; MSSA-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
@@ -750,12 +750,12 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
; MSSA-NEXT: [[TMP6:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP5]], <vscale x 16 x i8> [[DOTUNPACK10]], 2
; MSSA-NEXT: [[DOTUNPACK12:%.*]] = load <vscale x 16 x i8>, ptr [[REF_TMP_REPACK5]], align 16
; MSSA-NEXT: [[TMP7:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP6]], <vscale x 16 x i8> [[DOTUNPACK12]], 3
-; MSSA-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull [[REF_TMP]])
+; MSSA-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[REF_TMP]])
; MSSA-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP7]]
;
entry:
%ref.tmp = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16
- call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %ref.tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %ref.tmp)
%a.elt = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %a, 0
store <vscale x 4 x i32> %a.elt, ptr %ref.tmp, align 16
%0 = call i64 @llvm.vscale.i64()
@@ -790,7 +790,7 @@ entry:
%.elt11 = getelementptr inbounds i8, ptr %ref.tmp, i64 %14
%.unpack12 = load <vscale x 16 x i8>, ptr %.elt11, align 16
%15 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %12, <vscale x 16 x i8> %.unpack12, 3
- call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %ref.tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %ref.tmp)
ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %15
}
diff --git a/llvm/test/Transforms/GVNHoist/pr29034.ll b/llvm/test/Transforms/GVNHoist/pr29034.ll
index f5378ea..a5294c5 100644
--- a/llvm/test/Transforms/GVNHoist/pr29034.ll
+++ b/llvm/test/Transforms/GVNHoist/pr29034.ll
@@ -37,7 +37,7 @@
define void @music_task(ptr nocapture readnone %p) local_unnamed_addr {
entry:
%mapi = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %mapi)
+ call void @llvm.lifetime.start.p0(ptr %mapi)
store ptr null, ptr %mapi, align 8, !tbaa !1
%call = call i32 @music_decoder_init(ptr nonnull %mapi)
br label %while.cond
@@ -99,7 +99,7 @@ while.cond2.backedge: ; preds = %sw.default, %sw.bb1
br label %while.cond2
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare i32 @music_decoder_init(ptr)
declare i32 @music_play_api(ptr, i32, i32, i32, ptr)
declare i32 @printf(ptr nocapture readonly, ...)
diff --git a/llvm/test/Transforms/GVNSink/lifetime.ll b/llvm/test/Transforms/GVNSink/lifetime.ll
index 1a8a69b..f8731e5 100644
--- a/llvm/test/Transforms/GVNSink/lifetime.ll
+++ b/llvm/test/Transforms/GVNSink/lifetime.ll
@@ -9,34 +9,34 @@ define void @test_cant_sink(i1 %c) {
; CHECK-SAME: i1 [[C:%.*]]) {
; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[B:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]]
; CHECK: [[IF]]:
; CHECK-NEXT: store i64 1, ptr [[A]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: br label %[[JOIN:.*]]
; CHECK: [[ELSE]]:
; CHECK-NEXT: store i64 1, ptr [[B]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]])
; CHECK-NEXT: br label %[[JOIN]]
; CHECK: [[JOIN]]:
; CHECK-NEXT: ret void
;
%a = alloca i8
%b = alloca i8
- call void @llvm.lifetime.start(i64 1, ptr %a)
- call void @llvm.lifetime.start(i64 1, ptr %b)
+ call void @llvm.lifetime.start(ptr %a)
+ call void @llvm.lifetime.start(ptr %b)
br i1 %c, label %if, label %else
if:
store i64 1, ptr %a
- call void @llvm.lifetime.end(i64 1, ptr %a)
+ call void @llvm.lifetime.end(ptr %a)
br label %join
else:
store i64 1, ptr %b
- call void @llvm.lifetime.end(i64 1, ptr %b)
+ call void @llvm.lifetime.end(ptr %b)
br label %join
join:
@@ -47,7 +47,7 @@ define void @test_can_sink(i1 %c) {
; CHECK-LABEL: define void @test_can_sink(
; CHECK-SAME: i1 [[C:%.*]]) {
; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[ELSE:.*]]
; CHECK: [[IF]]:
; CHECK-NEXT: br label %[[JOIN:.*]]
@@ -55,21 +55,21 @@ define void @test_can_sink(i1 %c) {
; CHECK-NEXT: br label %[[JOIN]]
; CHECK: [[JOIN]]:
; CHECK-NEXT: store i64 1, ptr [[A]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i8
- call void @llvm.lifetime.start(i64 1, ptr %a)
+ call void @llvm.lifetime.start(ptr %a)
br i1 %c, label %if, label %else
if:
store i64 1, ptr %a
- call void @llvm.lifetime.end(i64 1, ptr %a)
+ call void @llvm.lifetime.end(ptr %a)
br label %join
else:
store i64 1, ptr %a
- call void @llvm.lifetime.end(i64 1, ptr %a)
+ call void @llvm.lifetime.end(ptr %a)
br label %join
join:
diff --git a/llvm/test/Transforms/GlobalOpt/dead-store-status.ll b/llvm/test/Transforms/GlobalOpt/dead-store-status.ll
index 9a8fbb8..7cb3a96 100644
--- a/llvm/test/Transforms/GlobalOpt/dead-store-status.ll
+++ b/llvm/test/Transforms/GlobalOpt/dead-store-status.ll
@@ -24,17 +24,17 @@ entry:
define i16 @bar() local_unnamed_addr #1 {
entry:
%local2 = alloca [1 x i16], align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %local2)
+ call void @llvm.lifetime.start.p0(ptr nonnull %local2)
store ptr %local2, ptr @global, align 1
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %local2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %local2)
ret i16 undef
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
attributes #0 = { nofree noinline norecurse nounwind writeonly }
attributes #1 = { noinline nounwind writeonly }
diff --git a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-1.ll b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-1.ll
index e5bab0c..28782d5 100644
--- a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-1.ll
+++ b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-1.ll
@@ -1,8 +1,8 @@
; RUN: opt -S -passes=hotcoldsplit -hotcoldsplit-threshold=0 < %s 2>&1 | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @use(ptr)
@@ -18,17 +18,17 @@ entry:
normalPath:
; These two uses of stack slots are non-overlapping. Based on this alone,
; the stack slots could be merged.
- call void @llvm.lifetime.start.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.start.p0(ptr %local1)
call void @use(ptr %local1)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local1)
- call void @llvm.lifetime.start.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.end.p0(ptr %local1)
+ call void @llvm.lifetime.start.p0(ptr %local2)
call void @use(ptr %local2)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.end.p0(ptr %local2)
ret void
; CHECK-LABEL: codeRepl:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr %local1)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr %local2)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %local1)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %local2)
; CHECK-NEXT: call i1 @foo.cold.1(ptr %local1, ptr %local2)
; CHECK-NEXT: br i1
@@ -36,19 +36,19 @@ outlinedPath:
; These two uses of stack slots are overlapping. This should prevent
; merging of stack slots. CodeExtractor must replicate the effects of
; these markers in the caller to inhibit stack coloring.
- call void @llvm.lifetime.start.p0(i64 1, ptr %local1)
- call void @llvm.lifetime.start.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.start.p0(ptr %local1)
+ call void @llvm.lifetime.start.p0(ptr %local2)
call void @cold_use2(ptr %local1, ptr %local2)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local1)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.end.p0(ptr %local1)
+ call void @llvm.lifetime.end.p0(ptr %local2)
br i1 undef, label %outlinedPath2, label %outlinedPathExit
outlinedPath2:
; These extra lifetime markers are used to test that we emit only one
; pair of guard markers in the caller per memory object.
- call void @llvm.lifetime.start.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.start.p0(ptr %local2)
call void @use(ptr %local2)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.end.p0(ptr %local2)
ret void
outlinedPathExit:
diff --git a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-2.ll b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-2.ll
index e42db78..da7a9b8 100644
--- a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-2.ll
+++ b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-2.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -passes=hotcoldsplit -hotcoldsplit-threshold=0 < %s 2>&1 | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @cold_use(ptr) cold
@@ -40,13 +40,13 @@ define void @only_lifetime_start_is_cold(i1 %arg) {
; CHECK-NEXT: [[LOCAL1:%.*]] = alloca i256, align 8
; CHECK-NEXT: br i1 [[ARG:%.*]], label [[CODEREPL:%.*]], label [[NO_EXTRACT1:%.*]]
; CHECK: codeRepl:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[LOCAL1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[LOCAL1]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @only_lifetime_start_is_cold.cold.1(ptr [[LOCAL1]], i1 [[ARG]]) #[[ATTR3:[0-9]+]]
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[NO_EXTRACT1]], label [[EXIT:%.*]]
; CHECK: no-extract1:
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[LOCAL1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[LOCAL1]])
; CHECK-NEXT: ret void
;
entry:
@@ -55,7 +55,7 @@ entry:
extract1:
; lt.start
- call void @llvm.lifetime.start.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.start.p0(ptr %local1)
call void @cold_use(ptr %local1)
br i1 %arg, label %extract2, label %no-extract1
@@ -67,7 +67,7 @@ no-extract1:
exit:
; lt.end
- call void @llvm.lifetime.end.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.end.p0(ptr %local1)
ret void
}
@@ -96,10 +96,10 @@ define void @only_lifetime_end_is_cold(i1 %arg) {
; CHECK-LABEL: @only_lifetime_end_is_cold(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOCAL1:%.*]] = alloca i256, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[LOCAL1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[LOCAL1]])
; CHECK-NEXT: br i1 [[ARG:%.*]], label [[NO_EXTRACT1:%.*]], label [[CODEREPL:%.*]]
; CHECK: no-extract1:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[LOCAL1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[LOCAL1]])
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: codeRepl:
; CHECK-NEXT: call void @only_lifetime_end_is_cold.cold.1(ptr [[LOCAL1]]) #[[ATTR3]]
@@ -110,18 +110,18 @@ define void @only_lifetime_end_is_cold(i1 %arg) {
entry:
; lt.start
%local1 = alloca i256
- call void @llvm.lifetime.start.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.start.p0(ptr %local1)
br i1 %arg, label %no-extract1, label %extract1
no-extract1:
; lt.end
- call void @llvm.lifetime.end.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.end.p0(ptr %local1)
br label %exit
extract1:
; lt.end
call void @cold_use(ptr %local1)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.end.p0(ptr %local1)
br label %exit
exit:
@@ -134,7 +134,7 @@ define void @do_not_lift_lifetime_end(i1 %arg) {
; CHECK-LABEL: @do_not_lift_lifetime_end(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LOCAL1:%.*]] = alloca i256, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[LOCAL1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[LOCAL1]])
; CHECK-NEXT: br label [[HEADER:%.*]]
; CHECK: header:
; CHECK-NEXT: call void @use(ptr [[LOCAL1]])
@@ -148,7 +148,7 @@ define void @do_not_lift_lifetime_end(i1 %arg) {
entry:
; lt.start
%local1 = alloca i256
- call void @llvm.lifetime.start.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.start.p0(ptr %local1)
br label %header
header:
@@ -167,7 +167,7 @@ extract2:
extract3:
; lt.end
- call void @llvm.lifetime.end.p0(i64 1, ptr %local1)
+ call void @llvm.lifetime.end.p0(ptr %local1)
br label %exit
exit:
diff --git a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-3.ll b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-3.ll
index 26faaa3..b453c61 100644
--- a/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-3.ll
+++ b/llvm/test/Transforms/HotColdSplit/lifetime-markers-on-inputs-3.ll
@@ -3,9 +3,9 @@
%type1 = type opaque
%type2 = type opaque
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @use(ptr, ptr)
@@ -23,16 +23,16 @@ normalPath:
ret void
; CHECK-LABEL: codeRepl:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr %local1)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr %local2)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %local1)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %local2)
; CHECK-NEXT: call void @foo.cold.1(ptr %local1, ptr %local2
outlinedPath:
- call void @llvm.lifetime.start.p0(i64 1, ptr %local1)
- call void @llvm.lifetime.start.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.start.p0(ptr %local1)
+ call void @llvm.lifetime.start.p0(ptr %local2)
call void @use2(ptr %local1, ptr %local2)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local1)
- call void @llvm.lifetime.end.p0(i64 1, ptr %local2)
+ call void @llvm.lifetime.end.p0(ptr %local1)
+ call void @llvm.lifetime.end.p0(ptr %local2)
br label %outlinedPathExit
outlinedPathExit:
diff --git a/llvm/test/Transforms/HotColdSplit/sink-multiple-bitcasts-of-allocas-pr42451.ll b/llvm/test/Transforms/HotColdSplit/sink-multiple-bitcasts-of-allocas-pr42451.ll
index df7cb3c..80249fc 100644
--- a/llvm/test/Transforms/HotColdSplit/sink-multiple-bitcasts-of-allocas-pr42451.ll
+++ b/llvm/test/Transforms/HotColdSplit/sink-multiple-bitcasts-of-allocas-pr42451.ll
@@ -6,8 +6,8 @@ target triple = "x86_64-apple-macosx10.14.0"
@c = common global i32 0, align 4
@h = common global i32 0, align 4
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #1
declare ptr @m()
@@ -27,15 +27,15 @@ bb:
bb3: ; preds = %bb
%i4 = call ptr @m()
- call void @llvm.lifetime.start.p0(i64 20, ptr %.sroa.4.i)
- call void @llvm.lifetime.start.p0(i64 6, ptr %.sroa.5.i)
+ call void @llvm.lifetime.start.p0(ptr %.sroa.4.i)
+ call void @llvm.lifetime.start.p0(ptr %.sroa.5.i)
call void @llvm.memset.p0.i64(ptr align 2 %.sroa.4.i, i8 0, i64 20, i1 false)
call void @llvm.memset.p0.i64(ptr align 8 %.sroa.5.i, i8 0, i64 6, i1 false)
%i5 = load i32, ptr @c, align 4, !tbaa !4
%i6 = trunc i32 %i5 to i16
- call void @llvm.lifetime.end.p0(i64 20, ptr %.sroa.4.i)
- call void @llvm.lifetime.end.p0(i64 6, ptr %.sroa.5.i)
- call void @llvm.lifetime.start.p0(i64 6, ptr %.sroa.5.i)
+ call void @llvm.lifetime.end.p0(ptr %.sroa.4.i)
+ call void @llvm.lifetime.end.p0(ptr %.sroa.5.i)
+ call void @llvm.lifetime.start.p0(ptr %.sroa.5.i)
call void @llvm.memset.p0.i64(ptr align 1 %.sroa.5.i, i8 3, i64 6, i1 false)
br label %bb7
@@ -47,7 +47,7 @@ bb7: ; preds = %bb7, %bb3
br i1 %i10, label %bb7, label %l.exit
l.exit: ; preds = %bb7
- call void @llvm.lifetime.end.p0(i64 6, ptr %.sroa.5.i)
+ call void @llvm.lifetime.end.p0(ptr %.sroa.5.i)
br label %bb11
bb11: ; preds = %l.exit, %bb
diff --git a/llvm/test/Transforms/IROutliner/alloca-addrspace-1.ll b/llvm/test/Transforms/IROutliner/alloca-addrspace-1.ll
index a096e6d..73db71b 100644
--- a/llvm/test/Transforms/IROutliner/alloca-addrspace-1.ll
+++ b/llvm/test/Transforms/IROutliner/alloca-addrspace-1.ll
@@ -20,14 +20,14 @@ declare i32 @llvm.foo(i32, i32)
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I3_LOC:%.*]] = alloca i32, align 4, addrspace(5)
; CHECK-NEXT: [[I1_LOC:%.*]] = alloca i32, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 -1, ptr addrspace(5) [[I1_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[I1_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 0, i32 1, ptr addrspace(5) [[I1_LOC]])
; CHECK-NEXT: [[I1_RELOAD:%.*]] = load i32, ptr addrspace(5) [[I1_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 -1, ptr addrspace(5) [[I1_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 -1, ptr addrspace(5) [[I3_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[I1_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[I3_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[I1_RELOAD]], i32 0, ptr addrspace(5) [[I3_LOC]])
; CHECK-NEXT: [[I3_RELOAD:%.*]] = load i32, ptr addrspace(5) [[I3_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 -1, ptr addrspace(5) [[I3_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[I3_LOC]])
; CHECK-NEXT: [[I4:%.*]] = tail call i32 @llvm.foo(i32 [[I3_RELOAD]], i32 0)
; CHECK-NEXT: ret i32 0
;
diff --git a/llvm/test/Transforms/IROutliner/alloca-addrspace.ll b/llvm/test/Transforms/IROutliner/alloca-addrspace.ll
index e870150..ed76444 100644
--- a/llvm/test/Transforms/IROutliner/alloca-addrspace.ll
+++ b/llvm/test/Transforms/IROutliner/alloca-addrspace.ll
@@ -18,10 +18,10 @@ declare i32 @func(i32, i32)
; CHECK-LABEL: define {{[^@]+}}@outlineable() {
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I1_LOC:%.*]] = alloca i32, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 -1, ptr addrspace(5) [[I1_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[I1_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 0, i32 1, ptr addrspace(5) [[I1_LOC]], i32 0)
; CHECK-NEXT: [[I1_RELOAD:%.*]] = load i32, ptr addrspace(5) [[I1_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 -1, ptr addrspace(5) [[I1_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[I1_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[I1_RELOAD]], i32 0, ptr addrspace(5) null, i32 -1)
; CHECK-NEXT: ret i32 0
;
diff --git a/llvm/test/Transforms/IROutliner/different-intrinsics.ll b/llvm/test/Transforms/IROutliner/different-intrinsics.ll
index 5fb22c3..f0e43bb 100644
--- a/llvm/test/Transforms/IROutliner/different-intrinsics.ll
+++ b/llvm/test/Transforms/IROutliner/different-intrinsics.ll
@@ -31,18 +31,18 @@ entry:
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
@@ -51,18 +51,18 @@ entry:
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
diff --git a/llvm/test/Transforms/IROutliner/different-order-phi-merges.ll b/llvm/test/Transforms/IROutliner/different-order-phi-merges.ll
index baf27ed..6730d1b 100644
--- a/llvm/test/Transforms/IROutliner/different-order-phi-merges.ll
+++ b/llvm/test/Transforms/IROutliner/different-order-phi-merges.ll
@@ -46,10 +46,10 @@ bb5:
; CHECK-LABEL: @f1(
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 0)
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
@@ -61,10 +61,10 @@ bb5:
; CHECK-LABEL: @f2(
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 1)
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll b/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll
index 534efc3..53d52f5 100644
--- a/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll
+++ b/llvm/test/Transforms/IROutliner/duplicate-merging-phis.ll
@@ -49,13 +49,13 @@ bb5:
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE1_CE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
; CHECK-NEXT: [[PHINODE1_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE1_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
@@ -69,13 +69,13 @@ bb5:
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE1_CE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
; CHECK-NEXT: [[PHINODE1_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE1_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll b/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll
index 3d3dbff..04ec9284f 100644
--- a/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll
+++ b/llvm/test/Transforms/IROutliner/exit-block-phi-node-value-attribution.ll
@@ -42,19 +42,19 @@ bb5:
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE_CE_LOC1:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 0)
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: placeholder:
; CHECK-NEXT: [[A:%.*]] = sub i32 5, 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb3:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PHINODE_CE_LOC1]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[PHINODE_CE_LOC1]], i32 1)
; CHECK-NEXT: [[PHINODE_CE_RELOAD2:%.*]] = load i32, ptr [[PHINODE_CE_LOC1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PHINODE_CE_LOC1]])
; CHECK-NEXT: br label [[BB5]]
; CHECK: placeholder1:
; CHECK-NEXT: [[B:%.*]] = add i32 5, 4
diff --git a/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll b/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll
index cd60f93..0e82217 100644
--- a/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll
+++ b/llvm/test/Transforms/IROutliner/exit-phi-nodes-incoming-value-constant-argument.ll
@@ -72,10 +72,10 @@ bb5:
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[A:%.*]] = sub i32 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[F_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr [[F_CE_LOC]], i32 0)
; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[F_CE_LOC]])
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb4:
; CHECK-NEXT: [[E:%.*]] = add i32 [[TMP0]], [[TMP1]]
diff --git a/llvm/test/Transforms/IROutliner/extraction.ll b/llvm/test/Transforms/IROutliner/extraction.ll
index 1eca4ea..77f904d 100644
--- a/llvm/test/Transforms/IROutliner/extraction.ll
+++ b/llvm/test/Transforms/IROutliner/extraction.ll
@@ -59,13 +59,13 @@ define void @extract_outs1() #0 {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
; CHECK-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
@@ -99,13 +99,13 @@ define void @extract_outs2() #0 {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/IROutliner/gvn-output-set-overload.ll b/llvm/test/Transforms/IROutliner/gvn-output-set-overload.ll
index 1184b4a..54f013c 100644
--- a/llvm/test/Transforms/IROutliner/gvn-output-set-overload.ll
+++ b/llvm/test/Transforms/IROutliner/gvn-output-set-overload.ll
@@ -44,10 +44,10 @@ next:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], ptr null, i32 0)
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
@@ -61,13 +61,13 @@ next:
; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[C_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[C_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]])
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[C_LOC]], ptr [[E_LOC]], i32 1)
; CHECK-NEXT: [[C_RELOAD:%.*]] = load i32, ptr [[C_LOC]], align 4
; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, ptr [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[C_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[C_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/gvn-phi-debug.ll b/llvm/test/Transforms/IROutliner/gvn-phi-debug.ll
index 951466c..0c899dc 100644
--- a/llvm/test/Transforms/IROutliner/gvn-phi-debug.ll
+++ b/llvm/test/Transforms/IROutliner/gvn-phi-debug.ll
@@ -8,10 +8,10 @@ define i32 @r() {
; CHECK-LABEL: define i32 @r() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTLOC:%.*]] = alloca ptr, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[DOTLOC]], i32 0)
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load ptr, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[IF_END8:%.*]], label [[ENTRY_AFTER_OUTLINE:%.*]]
; CHECK: entry_after_outline:
; CHECK-NEXT: [[CALL7:%.*]] = call i32 [[DOTRELOAD]]()
@@ -91,10 +91,10 @@ define i32 @w() !dbg !8 {
; CHECK-SAME: ) !dbg [[DBG8:![0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RETVAL_1_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RETVAL_1_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RETVAL_1_CE_LOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[RETVAL_1_CE_LOC]], i32 1), !dbg [[DBG11:![0-9]+]]
; CHECK-NEXT: [[RETVAL_1_CE_RELOAD:%.*]] = load i32, ptr [[RETVAL_1_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RETVAL_1_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RETVAL_1_CE_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[CLEANUP10:%.*]], label [[ENTRY_AFTER_OUTLINE:%.*]]
; CHECK: entry_after_outline:
; CHECK-NEXT: [[CALL8:%.*]] = call i32 @llvm.bswap.i32(i32 0)
diff --git a/llvm/test/Transforms/IROutliner/illegal-assumes.ll b/llvm/test/Transforms/IROutliner/illegal-assumes.ll
index d863fe7..c0c4e1a 100644
--- a/llvm/test/Transforms/IROutliner/illegal-assumes.ll
+++ b/llvm/test/Transforms/IROutliner/illegal-assumes.ll
@@ -12,10 +12,10 @@ define void @outline_assumes() {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DL_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_4(i1 true, ptr [[D]], ptr [[DL_LOC]])
; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, ptr [[DL_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DL_LOC]])
; CHECK-NEXT: [[SPLIT_INST:%.*]] = sub i1 [[DL_RELOAD]], [[DL_RELOAD]]
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: call void @llvm.assume(i1 [[DL_RELOAD]])
@@ -48,10 +48,10 @@ define void @outline_assumes2() {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DL_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_4(i1 false, ptr [[D]], ptr [[DL_LOC]])
; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, ptr [[DL_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DL_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[C]])
; CHECK-NEXT: call void @llvm.assume(i1 [[DL_RELOAD]])
; CHECK-NEXT: call void @outlined_ir_func_2(ptr [[A]], ptr [[B]], ptr [[C]])
@@ -82,10 +82,10 @@ define void @outline_assumes3() {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DL_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i1 true, ptr [[D]], ptr [[A]], ptr [[B]], ptr [[C]], ptr [[DL_LOC]])
; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, ptr [[DL_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DL_LOC]])
; CHECK-NEXT: call void @llvm.assume(i1 [[DL_RELOAD]])
; CHECK-NEXT: call void @outlined_ir_func_3(ptr [[A]])
; CHECK-NEXT: ret void
@@ -115,10 +115,10 @@ define void @outline_assumes4() {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[D:%.*]] = alloca i1, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DL_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i1 false, ptr [[D]], ptr [[A]], ptr [[B]], ptr [[C]], ptr [[DL_LOC]])
; CHECK-NEXT: [[DL_RELOAD:%.*]] = load i1, ptr [[DL_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DL_LOC]])
; CHECK-NEXT: call void @llvm.assume(i1 [[DL_RELOAD]])
; CHECK-NEXT: call void @outlined_ir_func_3(ptr [[A]])
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/illegal-memcpy.ll b/llvm/test/Transforms/IROutliner/illegal-memcpy.ll
index 20e009a..523fd23 100644
--- a/llvm/test/Transforms/IROutliner/illegal-memcpy.ll
+++ b/llvm/test/Transforms/IROutliner/illegal-memcpy.ll
@@ -12,18 +12,18 @@ define i8 @function1(ptr noalias %s, ptr noalias %d, i64 %len) {
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
entry:
@@ -41,18 +41,18 @@ define i8 @function2(ptr noalias %s, ptr noalias %d, i64 %len) {
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
entry:
diff --git a/llvm/test/Transforms/IROutliner/illegal-memmove.ll b/llvm/test/Transforms/IROutliner/illegal-memmove.ll
index 06480c8..7482405 100644
--- a/llvm/test/Transforms/IROutliner/illegal-memmove.ll
+++ b/llvm/test/Transforms/IROutliner/illegal-memmove.ll
@@ -12,18 +12,18 @@ define i8 @function1(ptr noalias %s, ptr noalias %d, i64 %len) {
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
entry:
@@ -41,18 +41,18 @@ define i8 @function2(ptr noalias %s, ptr noalias %d, i64 %len) {
; CHECK-NEXT: [[B_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_LOC:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[S:%.*]], ptr [[D:%.*]], ptr [[A_LOC]], ptr [[B_LOC]])
; CHECK-NEXT: [[A_RELOAD:%.*]] = load i8, ptr [[A_LOC]], align 1
; CHECK-NEXT: [[B_RELOAD:%.*]] = load i8, ptr [[B_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_LOC]])
; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr [[D]], ptr [[S]], i64 [[LEN:%.*]], i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i8 [[A_RELOAD]], i8 [[B_RELOAD]], ptr [[S]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
entry:
diff --git a/llvm/test/Transforms/IROutliner/illegal-vaarg.ll b/llvm/test/Transforms/IROutliner/illegal-vaarg.ll
index 38dfd25..15f9aa2 100644
--- a/llvm/test/Transforms/IROutliner/illegal-vaarg.ll
+++ b/llvm/test/Transforms/IROutliner/illegal-vaarg.ll
@@ -21,10 +21,10 @@ define i32 @func1(i32 %a, double %b, ptr %v, ...) nounwind {
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
; CHECK-NEXT: call void @llvm.va_copy.p0(ptr [[V:%.*]], ptr [[AP]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[AP]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: ret i32 [[TMP_RELOAD]]
;
entry:
@@ -56,10 +56,10 @@ define i32 @func2(i32 %a, double %b, ptr %v, ...) nounwind {
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
; CHECK-NEXT: call void @llvm.va_copy.p0(ptr [[V:%.*]], ptr [[AP]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[AP]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: ret i32 [[TMP_RELOAD]]
;
entry:
diff --git a/llvm/test/Transforms/IROutliner/mismatched-phi-exits-not-in-first-outlined.ll b/llvm/test/Transforms/IROutliner/mismatched-phi-exits-not-in-first-outlined.ll
index 24ad86f..f9d4999 100644
--- a/llvm/test/Transforms/IROutliner/mismatched-phi-exits-not-in-first-outlined.ll
+++ b/llvm/test/Transforms/IROutliner/mismatched-phi-exits-not-in-first-outlined.ll
@@ -47,10 +47,10 @@ first:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 0)
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/IROutliner/mismatched-phi-exits.ll b/llvm/test/Transforms/IROutliner/mismatched-phi-exits.ll
index 6b50e99..7191c80 100644
--- a/llvm/test/Transforms/IROutliner/mismatched-phi-exits.ll
+++ b/llvm/test/Transforms/IROutliner/mismatched-phi-exits.ll
@@ -38,10 +38,10 @@ first:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 0)
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/IROutliner/mismatched-phi-outputs-ordering.ll b/llvm/test/Transforms/IROutliner/mismatched-phi-outputs-ordering.ll
index ab1836f..9085e7e 100644
--- a/llvm/test/Transforms/IROutliner/mismatched-phi-outputs-ordering.ll
+++ b/llvm/test/Transforms/IROutliner/mismatched-phi-outputs-ordering.ll
@@ -48,16 +48,16 @@ next:
; CHECK-NEXT: [[D_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[D_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[D_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[E_LOC]], ptr [[D_LOC]], ptr [[DOTCE_LOC]], i32 0)
; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, ptr [[E_LOC]], align 4
; CHECK-NEXT: [[D_RELOAD:%.*]] = load i32, ptr [[D_LOC]], align 4
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[D_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[D_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
@@ -72,13 +72,13 @@ next:
; CHECK-NEXT: [[D_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[E_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[D_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[D_LOC]])
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[E_LOC]], ptr [[D_LOC]], ptr null, i32 1)
; CHECK-NEXT: [[E_RELOAD:%.*]] = load i32, ptr [[E_LOC]], align 4
; CHECK-NEXT: [[D_RELOAD:%.*]] = load i32, ptr [[D_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[D_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[D_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/must-capture-all-phi-nodes-begin.ll b/llvm/test/Transforms/IROutliner/must-capture-all-phi-nodes-begin.ll
index 32973ea..3229f42 100644
--- a/llvm/test/Transforms/IROutliner/must-capture-all-phi-nodes-begin.ll
+++ b/llvm/test/Transforms/IROutliner/must-capture-all-phi-nodes-begin.ll
@@ -57,10 +57,10 @@ first:
; CHECK: test1:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[E_RELOAD]], [[TEST1]] ], [ [[Y]], [[ENTRY]] ]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]])
; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[TEST1]], label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
@@ -78,10 +78,10 @@ first:
; CHECK: test1:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[Y]], [[ENTRY]] ], [ [[E_RELOAD]], [[TEST1]] ]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]])
; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[TEST1]], label [[FIRST:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/no-external-block-entries.ll b/llvm/test/Transforms/IROutliner/no-external-block-entries.ll
index 4426009..fb2c5e9 100644
--- a/llvm/test/Transforms/IROutliner/no-external-block-entries.ll
+++ b/llvm/test/Transforms/IROutliner/no-external-block-entries.ll
@@ -35,10 +35,10 @@ block_6:
; CHECK-LABEL: @fn1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[B_CE_LOC]], i32 0)
; CHECK-NEXT: [[B_CE_RELOAD:%.*]] = load i32, ptr [[B_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_CE_LOC]])
; CHECK-NEXT: br label [[BLOCK_3:%.*]]
; CHECK: block_3:
; CHECK-NEXT: [[B:%.*]] = phi i32 [ [[B_CE_RELOAD]], [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll b/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll
index 77e3a82..9627274 100644
--- a/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll
+++ b/llvm/test/Transforms/IROutliner/one-external-incoming-block-phi-node.ll
@@ -33,10 +33,10 @@ block_6:
; CHECK-LABEL: @fn1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B_CE_LOC:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[B_CE_LOC]], i32 0)
; CHECK-NEXT: [[B_CE_RELOAD:%.*]] = load i32, ptr [[B_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B_CE_LOC]])
; CHECK-NEXT: br label [[BLOCK_3:%.*]]
; CHECK: block_3:
; CHECK-NEXT: [[B:%.*]] = phi i32 [ [[B_CE_RELOAD]], [[ENTRY:%.*]] ]
diff --git a/llvm/test/Transforms/IROutliner/outline-memcpy.ll b/llvm/test/Transforms/IROutliner/outline-memcpy.ll
index 0cf4f34..83fd5f6 100644
--- a/llvm/test/Transforms/IROutliner/outline-memcpy.ll
+++ b/llvm/test/Transforms/IROutliner/outline-memcpy.ll
@@ -27,20 +27,20 @@ entry:
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
diff --git a/llvm/test/Transforms/IROutliner/outline-memmove.ll b/llvm/test/Transforms/IROutliner/outline-memmove.ll
index cf79244..c512cd4 100644
--- a/llvm/test/Transforms/IROutliner/outline-memmove.ll
+++ b/llvm/test/Transforms/IROutliner/outline-memmove.ll
@@ -27,20 +27,20 @@ entry:
; CHECK-LABEL: @function1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
; CHECK-LABEL: @function2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RET_LOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RET_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[S:%.*]], ptr [[D:%.*]], i64 [[LEN:%.*]], ptr [[RET_LOC]])
; CHECK-NEXT: [[RET_RELOAD:%.*]] = load i8, ptr [[RET_LOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[RET_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RET_LOC]])
; CHECK-NEXT: ret i8 [[RET_RELOAD]]
;
;
diff --git a/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll b/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll
index 2d52608..6a9cbca 100644
--- a/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll
+++ b/llvm/test/Transforms/IROutliner/outline-vaarg-intrinsic.ll
@@ -53,10 +53,10 @@ entry:
; CHECK-NEXT: store double [[B]], ptr [[B_ADDR]], align 8
; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]])
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[V]], ptr [[AP]], i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: ret i32 [[TMP_RELOAD]]
;
;
@@ -72,10 +72,10 @@ entry:
; CHECK-NEXT: store double [[B]], ptr [[B_ADDR]], align 8
; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[AP]])
; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[V]], ptr [[AP]], i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP_LOC]])
; CHECK-NEXT: ret i32 [[TMP_RELOAD]]
;
;
diff --git a/llvm/test/Transforms/IROutliner/outlining-bitcasts.ll b/llvm/test/Transforms/IROutliner/outlining-bitcasts.ll
index 31f1d12..a8153a4 100644
--- a/llvm/test/Transforms/IROutliner/outlining-bitcasts.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-bitcasts.ll
@@ -8,8 +8,8 @@
; Additionally, we check that the newly added bitcast instruction is excluded in
; further extractions.
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @outline_bitcast_base() {
entry:
@@ -38,11 +38,11 @@ entry:
%al = load i32, ptr %a
%bl = load i32, ptr %b
%cl = load i32, ptr %c
- call void @llvm.lifetime.start.p0(i64 -1, ptr %d)
+ call void @llvm.lifetime.start.p0(ptr %d)
%am = load i32, ptr %b
%bm = load i32, ptr %a
%cm = load i32, ptr %c
- call void @llvm.lifetime.end.p0(i64 -1, ptr %d)
+ call void @llvm.lifetime.end.p0(ptr %d)
ret void
}
@@ -61,8 +61,8 @@ entry:
%am = add i32 %a, %b
%bm = add i32 %b, %a
%cm = add i32 %b, %c
- call void @llvm.lifetime.start.p0(i64 -1, ptr %d)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %d)
+ call void @llvm.lifetime.start.p0(ptr %d)
+ call void @llvm.lifetime.end.p0(ptr %d)
ret void
}
@@ -114,13 +114,13 @@ entry:
; CHECK-LABEL: @outlined_ir_func_1(
; CHECK-NEXT: newFuncRoot:
; CHECK-NEXT: [[D:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[D]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[D]])
; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]]
; CHECK: entry_to_outline:
; CHECK-NEXT: [[AL:%.*]] = add i32 [[TMP0:%.*]], [[TMP1:%.*]]
; CHECK-NEXT: [[BL:%.*]] = add i32 [[TMP1]], [[TMP0]]
; CHECK-NEXT: [[CL:%.*]] = add i32 [[TMP1]], [[TMP2:%.*]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[D]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[D]])
; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]]
; CHECK: entry_after_outline.exitStub:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/outlining-branches-phi-nodes.ll b/llvm/test/Transforms/IROutliner/outlining-branches-phi-nodes.ll
index 28c23e3..bb6bf8f 100644
--- a/llvm/test/Transforms/IROutliner/outlining-branches-phi-nodes.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-branches-phi-nodes.ll
@@ -100,10 +100,10 @@ block_6:
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DIFF_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DIFF_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[DIFF_CE_LOC]])
; CHECK-NEXT: [[DIFF_CE_RELOAD:%.*]] = load i32, ptr [[DIFF_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DIFF_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DIFF_CE_LOC]])
; CHECK-NEXT: br label [[BLOCK_6:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
@@ -127,10 +127,10 @@ block_6:
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DIFF_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DIFF_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[DIFF_CE_LOC]])
; CHECK-NEXT: [[DIFF_CE_RELOAD:%.*]] = load i32, ptr [[DIFF_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DIFF_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DIFF_CE_LOC]])
; CHECK-NEXT: br label [[BLOCK_6:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/outlining-cost-model.ll b/llvm/test/Transforms/IROutliner/outlining-cost-model.ll
index 81bf4f0..bb3163a 100644
--- a/llvm/test/Transforms/IROutliner/outlining-cost-model.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-cost-model.ll
@@ -104,13 +104,13 @@ define void @function3() #0 {
; NOCOST-NEXT: [[B:%.*]] = alloca i32, align 4
; NOCOST-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; NOCOST-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; NOCOST-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
; NOCOST-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; NOCOST-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; NOCOST-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
; NOCOST-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; NOCOST-NEXT: ret void
@@ -159,13 +159,13 @@ define void @function4() #0 {
; NOCOST-NEXT: [[B:%.*]] = alloca i32, align 4
; NOCOST-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; NOCOST-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; NOCOST-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; NOCOST-NEXT: call void @outlined_ir_func_1(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
; NOCOST-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; NOCOST-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; NOCOST-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; NOCOST-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; NOCOST-NEXT: call void @outlined_ir_func_2(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; NOCOST-NEXT: ret void
;
diff --git a/llvm/test/Transforms/IROutliner/outlining-different-output-blocks.ll b/llvm/test/Transforms/IROutliner/outlining-different-output-blocks.ll
index 2e1fae3..64e87fc 100644
--- a/llvm/test/Transforms/IROutliner/outlining-different-output-blocks.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-different-output-blocks.ll
@@ -14,13 +14,13 @@ define void @outline_outputs1() #0 {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]], i32 0)
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
@@ -53,13 +53,13 @@ define void @outline_outputs2() #0 {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[SUB_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[SUB_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[SUB_LOC]], ptr [[DOTLOC]], i32 1)
; CHECK-NEXT: [[SUB_RELOAD:%.*]] = load i32, ptr [[SUB_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[SUB_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[SUB_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[SUB_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/IROutliner/outlining-exits-to-phi-node.ll b/llvm/test/Transforms/IROutliner/outlining-exits-to-phi-node.ll
index cb5d505..d901955 100644
--- a/llvm/test/Transforms/IROutliner/outlining-exits-to-phi-node.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-exits-to-phi-node.ll
@@ -43,10 +43,10 @@ first:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
@@ -59,10 +59,10 @@ first:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br label [[FIRST:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/outlining-multiple-exits-diff-outputs.ll b/llvm/test/Transforms/IROutliner/outlining-multiple-exits-diff-outputs.ll
index 463e097..9dbfa9e 100644
--- a/llvm/test/Transforms/IROutliner/outlining-multiple-exits-diff-outputs.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-multiple-exits-diff-outputs.ll
@@ -103,19 +103,19 @@ block_7:
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[AVAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[BVAL_LOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[AVAL_LOC]], ptr [[BVAL_LOC]], i32 0)
; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, ptr [[AVAL_LOC]], align 4
; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, ptr [[BVAL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[AVAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[BVAL_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BLOCK_6:%.*]], label [[BLOCK_7:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[AVAL_RELOAD]], [[BVAL_RELOAD]]
@@ -143,19 +143,19 @@ block_7:
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[MUL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[MUL_LOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[ADD_LOC]], ptr [[MUL_LOC]], i32 1)
; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[MUL_RELOAD:%.*]] = load i32, ptr [[MUL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[MUL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[MUL_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BLOCK_7:%.*]], label [[BLOCK_6:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[A2VAL_RELOAD]], [[B2VAL_RELOAD]]
diff --git a/llvm/test/Transforms/IROutliner/outlining-multiple-exits-one-output-set.ll b/llvm/test/Transforms/IROutliner/outlining-multiple-exits-one-output-set.ll
index 5293647..f789735 100644
--- a/llvm/test/Transforms/IROutliner/outlining-multiple-exits-one-output-set.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-multiple-exits-one-output-set.ll
@@ -124,19 +124,19 @@ block_7:
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[MUL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[MUL_LOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[ADD_LOC]], ptr [[MUL_LOC]], i32 0)
; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[MUL_RELOAD:%.*]] = load i32, ptr [[MUL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[MUL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[MUL_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BLOCK_7:%.*]], label [[BLOCK_6:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[A2VAL_RELOAD]], [[B2VAL_RELOAD]]
diff --git a/llvm/test/Transforms/IROutliner/outlining-multiple-exits.ll b/llvm/test/Transforms/IROutliner/outlining-multiple-exits.ll
index 663e6d8..1de13eb 100644
--- a/llvm/test/Transforms/IROutliner/outlining-multiple-exits.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-multiple-exits.ll
@@ -104,19 +104,19 @@ block_7:
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[AVAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[BVAL_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[AVAL_LOC]], ptr [[BVAL_LOC]])
; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, ptr [[AVAL_LOC]], align 4
; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, ptr [[BVAL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[AVAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[BVAL_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BLOCK_6:%.*]], label [[BLOCK_7:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[AVAL_RELOAD]], [[BVAL_RELOAD]]
@@ -144,19 +144,19 @@ block_7:
; CHECK-NEXT: [[B2:%.*]] = alloca i32, align 4
; CHECK-NEXT: br label [[BLOCK_2]]
; CHECK: block_2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[AVAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[BVAL_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[A2VAL_LOC]], ptr [[B2VAL_LOC]], ptr [[AVAL_LOC]], ptr [[BVAL_LOC]])
; CHECK-NEXT: [[A2VAL_RELOAD:%.*]] = load i32, ptr [[A2VAL_LOC]], align 4
; CHECK-NEXT: [[B2VAL_RELOAD:%.*]] = load i32, ptr [[B2VAL_LOC]], align 4
; CHECK-NEXT: [[AVAL_RELOAD:%.*]] = load i32, ptr [[AVAL_LOC]], align 4
; CHECK-NEXT: [[BVAL_RELOAD:%.*]] = load i32, ptr [[BVAL_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B2VAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[AVAL_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[BVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B2VAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[AVAL_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[BVAL_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BLOCK_7:%.*]], label [[BLOCK_6:%.*]]
; CHECK: block_6:
; CHECK-NEXT: [[DIFF:%.*]] = sub i32 [[A2VAL_RELOAD]], [[B2VAL_RELOAD]]
diff --git a/llvm/test/Transforms/IROutliner/outlining-remapped-outputs.ll b/llvm/test/Transforms/IROutliner/outlining-remapped-outputs.ll
index 6d0b153..77f17c3 100644
--- a/llvm/test/Transforms/IROutliner/outlining-remapped-outputs.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-remapped-outputs.ll
@@ -17,24 +17,24 @@ define void @outline_outputs1() #0 {
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT2:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT2:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 2, ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: br label [[NEXT:%.*]]
; CHECK: next:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD2_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD2_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC2]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[ADD_RELOAD]], ptr [[OUTPUT]], ptr [[RESULT]], ptr [[OUTPUT2]], ptr [[ADD2_LOC]], ptr [[DOTLOC2]])
; CHECK-NEXT: [[ADD2_RELOAD:%.*]] = load i32, ptr [[ADD2_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD3:%.*]] = load i32, ptr [[DOTLOC2]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD2_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD2_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC2]])
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD3]], i32 [[ADD2_RELOAD]], ptr [[RESULT2]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/IROutliner/outlining-same-output-blocks.ll b/llvm/test/Transforms/IROutliner/outlining-same-output-blocks.ll
index 380c53d..cc4f6ef 100644
--- a/llvm/test/Transforms/IROutliner/outlining-same-output-blocks.ll
+++ b/llvm/test/Transforms/IROutliner/outlining-same-output-blocks.ll
@@ -14,13 +14,13 @@ define void @outline_outputs1() #0 {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[OUTPUT]], align 4
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
@@ -52,13 +52,13 @@ define void @outline_outputs2() #0 {
; CHECK-NEXT: [[B:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[OUTPUT:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[RESULT:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[A]], ptr [[B]], ptr [[OUTPUT]], ptr [[ADD_LOC]], ptr [[DOTLOC]])
; CHECK-NEXT: [[ADD_RELOAD:%.*]] = load i32, ptr [[ADD_LOC]], align 4
; CHECK-NEXT: [[DOTRELOAD:%.*]] = load i32, ptr [[DOTLOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ADD_LOC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ADD_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTLOC]])
; CHECK-NEXT: call void @outlined_ir_func_1(i32 [[DOTRELOAD]], i32 [[ADD_RELOAD]], ptr [[RESULT]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll b/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll
index 4bbe6e7..15d313a 100644
--- a/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll
+++ b/llvm/test/Transforms/IROutliner/phi-node-exit-path-order.ll
@@ -58,10 +58,10 @@ bb5:
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[A:%.*]] = add i32 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[F_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], ptr [[F_CE_LOC]], i32 0)
; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[F_CE_LOC]])
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb4:
; CHECK-NEXT: [[E:%.*]] = sub i32 [[TMP0]], [[TMP1]]
@@ -77,10 +77,10 @@ bb5:
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: bb2:
; CHECK-NEXT: [[A:%.*]] = sub i32 [[TMP0:%.*]], [[TMP1:%.*]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[F_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32 [[TMP0]], i32 [[TMP1]], ptr [[F_CE_LOC]], i32 1)
; CHECK-NEXT: [[F_CE_RELOAD:%.*]] = load i32, ptr [[F_CE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[F_CE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[F_CE_LOC]])
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb4:
; CHECK-NEXT: [[E:%.*]] = add i32 [[TMP0]], [[TMP1]]
diff --git a/llvm/test/Transforms/IROutliner/phi-nodes-output-overload.ll b/llvm/test/Transforms/IROutliner/phi-nodes-output-overload.ll
index 9631bfa..9e443ab 100644
--- a/llvm/test/Transforms/IROutliner/phi-nodes-output-overload.ll
+++ b/llvm/test/Transforms/IROutliner/phi-nodes-output-overload.ll
@@ -43,10 +43,10 @@ next:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 0)
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[DOTCE_RELOAD]], [[ENTRY:%.*]] ]
@@ -59,10 +59,10 @@ next:
; CHECK-NEXT: entry:
; CHECK-NEXT: [[DOTCE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], ptr [[DOTCE_LOC]], i32 1)
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TMP1]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: first:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IROutliner/phi-nodes-parent-block-referential.ll b/llvm/test/Transforms/IROutliner/phi-nodes-parent-block-referential.ll
index 608abfa..02930d7 100644
--- a/llvm/test/Transforms/IROutliner/phi-nodes-parent-block-referential.ll
+++ b/llvm/test/Transforms/IROutliner/phi-nodes-parent-block-referential.ll
@@ -53,10 +53,10 @@ first:
; CHECK: test1:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[E_RELOAD]], [[TEST1]] ], [ [[Y]], [[ENTRY]] ]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]])
; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]])
; CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[C]], [[C]]
; CHECK-NEXT: br i1 true, label [[FIRST:%.*]], label [[TEST1]]
; CHECK: first:
@@ -75,10 +75,10 @@ first:
; CHECK: test1:
; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[E_RELOAD:%.*]], [[TEST1]] ], [ [[Y]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[Y]], [[ENTRY]] ], [ [[E_RELOAD]], [[TEST1]] ]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[E_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[E_LOC]])
; CHECK-NEXT: [[E_RELOAD]] = load i32, ptr [[E_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[E_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[E_LOC]])
; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[C]], [[C]]
; CHECK-NEXT: br i1 true, label [[FIRST:%.*]], label [[TEST1]]
; CHECK: first:
diff --git a/llvm/test/Transforms/IROutliner/region-inputs-in-phi-nodes.ll b/llvm/test/Transforms/IROutliner/region-inputs-in-phi-nodes.ll
index f46035a..25b1e8e 100644
--- a/llvm/test/Transforms/IROutliner/region-inputs-in-phi-nodes.ll
+++ b/llvm/test/Transforms/IROutliner/region-inputs-in-phi-nodes.ll
@@ -53,10 +53,10 @@ next:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Z:%.*]] = add i32 [[C]], [[C]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
@@ -73,10 +73,10 @@ next:
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[Z:%.*]] = mul i32 [[C]], [[C]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[TMP0]], i32 [[C]], ptr [[DOTCE_LOC]])
; CHECK-NEXT: [[DOTCE_RELOAD:%.*]] = load i32, ptr [[DOTCE_LOC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[DOTCE_LOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DOTCE_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[FIRST:%.*]], label [[NEXT:%.*]]
; CHECK: dummy:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/IndVarSimplify/exit_value_test2.ll b/llvm/test/Transforms/IndVarSimplify/exit_value_test2.ll
index 66c7222..697c816 100644
--- a/llvm/test/Transforms/IndVarSimplify/exit_value_test2.ll
+++ b/llvm/test/Transforms/IndVarSimplify/exit_value_test2.ll
@@ -6,14 +6,14 @@
; udiv will be introduced by expand and the cost will be high.
declare void @_Z3mixRjj(ptr dereferenceable(4), i32)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define i32 @_Z3fooPKcjj(ptr nocapture readonly %s, i32 %len, i32 %c) {
; CHECK-LABEL: @_Z3fooPKcjj(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: store i32 -1640531527, ptr [[A]], align 4
; CHECK-NEXT: [[CMP8:%.*]] = icmp ugt i32 [[LEN:%.*]], 11
; CHECK-NEXT: br i1 [[CMP8]], label [[WHILE_BODY_LR_PH:%.*]], label [[WHILE_END:%.*]]
@@ -40,12 +40,12 @@ define i32 @_Z3fooPKcjj(ptr nocapture readonly %s, i32 %len, i32 %c) {
; CHECK-NEXT: [[KEYLEN_0_LCSSA:%.*]] = phi i32 [ [[SUB_LCSSA]], [[WHILE_COND_WHILE_END_CRIT_EDGE]] ], [ [[LEN]], [[ENTRY:%.*]] ]
; CHECK-NEXT: call void @_Z3mixRjj(ptr dereferenceable(4) [[A]], i32 [[KEYLEN_0_LCSSA]])
; CHECK-NEXT: [[T4:%.*]] = load i32, ptr [[A]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret i32 [[T4]]
;
entry:
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
store i32 -1640531527, ptr %a, align 4
%cmp8 = icmp ugt i32 %len, 11
br i1 %cmp8, label %while.body.lr.ph, label %while.end
@@ -76,7 +76,7 @@ while.end: ; preds = %while.cond.while.en
%keylen.0.lcssa = phi i32 [ %sub.lcssa, %while.cond.while.end_crit_edge ], [ %len, %entry ]
call void @_Z3mixRjj(ptr dereferenceable(4) %a, i32 %keylen.0.lcssa)
%t4 = load i32, ptr %a, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret i32 %t4
}
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/lifetime.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/lifetime.ll
index 053d073..4c04e6d 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/lifetime.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/lifetime.ll
@@ -4,17 +4,17 @@
define i32 @lifetime_flat_pointer() {
; CHECK-LABEL: define i32 @lifetime_flat_pointer() {
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4, addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p5(i64 4, ptr addrspace(5) [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p5(ptr addrspace(5) [[ALLOCA]])
; CHECK-NEXT: store i32 1, ptr addrspace(5) [[ALLOCA]], align 4
; CHECK-NEXT: [[RET:%.*]] = load i32, ptr addrspace(5) [[ALLOCA]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p5(i64 4, ptr addrspace(5) [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p5(ptr addrspace(5) [[ALLOCA]])
; CHECK-NEXT: ret i32 [[RET]]
;
%alloca = alloca i32, align 4, addrspace(5)
%flat = addrspacecast ptr addrspace(5) %alloca to ptr
- call void @llvm.lifetime.start(i64 4, ptr addrspace(5) %alloca)
+ call void @llvm.lifetime.start(ptr addrspace(5) %alloca)
store i32 1, ptr %flat, align 4
%ret = load i32, ptr %flat, align 4
- call void @llvm.lifetime.end(i64 4, ptr addrspace(5) %alloca)
+ call void @llvm.lifetime.end(ptr addrspace(5) %alloca)
ret i32 %ret
}
diff --git a/llvm/test/Transforms/InferAddressSpaces/NVPTX/lifetime.ll b/llvm/test/Transforms/InferAddressSpaces/NVPTX/lifetime.ll
index 31e914a..1a21416 100644
--- a/llvm/test/Transforms/InferAddressSpaces/NVPTX/lifetime.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/NVPTX/lifetime.ll
@@ -7,20 +7,20 @@ define i32 @lifetime_flat_pointer() {
; CHECK-LABEL: define i32 @lifetime_flat_pointer() {
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[ALLOCA]] to ptr addrspace(5)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ALLOCA]])
; CHECK-NEXT: store i32 1, ptr addrspace(5) [[TMP1]], align 4
; CHECK-NEXT: [[RET:%.*]] = load i32, ptr addrspace(5) [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOCA]])
; CHECK-NEXT: ret i32 [[RET]]
;
%alloca = alloca i32, align 4
%1 = addrspacecast ptr %alloca to ptr addrspace(5)
- call void @llvm.lifetime.start.p0(i64 4, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
store i32 1, ptr addrspace(5) %1, align 4
%ret = load i32, ptr addrspace(5) %1, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
ret i32 %ret
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll b/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll
index c1375cb..54daf7c 100644
--- a/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll
+++ b/llvm/test/Transforms/Inline/AArch64/sve-alloca-merge.ll
@@ -14,8 +14,8 @@ entry:
define i64 @foo() {
; CHECK-LABEL: @foo(
-; CHECK: call void @llvm.lifetime.start.p0(i64 -1, ptr %{{.*}})
-; CHECK: call void @llvm.lifetime.end.p0(i64 -1, ptr %{{.*}})
+; CHECK: call void @llvm.lifetime.start.p0(ptr %{{.*}})
+; CHECK: call void @llvm.lifetime.end.p0(ptr %{{.*}})
entry:
%a = alloca <vscale x 2 x i64>, align 16
store <vscale x 2 x i64> zeroinitializer, ptr %a, align 16
diff --git a/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll b/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll
index bbce9e4..c3f6dd7 100644
--- a/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll
+++ b/llvm/test/Transforms/Inline/ML/state-tracking-coro.ll
@@ -31,10 +31,10 @@ entry:
await.ready:
%StrayCoroSave = call token @llvm.coro.save(ptr null)
%val = load i32, ptr %ref.tmp7
- call void @llvm.lifetime.start.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.start.p0(ptr %testval)
%test = load i32, ptr %testval
call void @print(i32 %test)
- call void @llvm.lifetime.end.p0(i64 4, ptr %testval)
+ call void @llvm.lifetime.end.p0(ptr %testval)
call void @print(i32 %val)
br label %exit
exit:
@@ -54,5 +54,5 @@ declare i8 @llvm.coro.suspend(token, i1) #3
declare void @"\01??3@YAXPEAX@Z"(ptr) local_unnamed_addr #10
declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
declare i1 @llvm.coro.end(ptr, i1) #3
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
diff --git a/llvm/test/Transforms/Inline/SystemZ/inline-target-attr.ll b/llvm/test/Transforms/Inline/SystemZ/inline-target-attr.ll
index b5c4f42..71b463b 100644
--- a/llvm/test/Transforms/Inline/SystemZ/inline-target-attr.ll
+++ b/llvm/test/Transforms/Inline/SystemZ/inline-target-attr.ll
@@ -12,28 +12,28 @@ entry:
declare i32 @baz(...) #0
-define i32 @bar() #1 {
+define i32 @features_subset() #1 {
entry:
%call = call i32 @foo()
ret i32 %call
-; CHECK-LABEL: bar
-; CHECK: call i32 @foo()
+; CHECK-LABEL: features_subset
+; CHECK: call i32 (...) @baz()
}
-define i32 @qux() #0 {
+define i32 @features_equal() #0 {
entry:
%call = call i32 @foo()
ret i32 %call
-; CHECK-LABEL: qux
+; CHECK-LABEL: features_equal
; CHECK: call i32 (...) @baz()
}
-define i32 @quux() #2 {
+define i32 @features_different() #2 {
entry:
- %call = call i32 @bar()
+ %call = call i32 @foo()
ret i32 %call
-; CHECK-LABEL: quux
-; CHECK: call i32 @bar()
+; CHECK-LABEL: features_different
+; CHECK: call i32 @foo()
}
diff --git a/llvm/test/Transforms/Inline/access-attributes-prop.ll b/llvm/test/Transforms/Inline/access-attributes-prop.ll
index 28fa44e..fed2590 100644
--- a/llvm/test/Transforms/Inline/access-attributes-prop.ll
+++ b/llvm/test/Transforms/Inline/access-attributes-prop.ll
@@ -410,9 +410,9 @@ define void @prop_fn_decl_fail_alloca(ptr %p) {
; CHECK-LABEL: define {{[^@]+}}@prop_fn_decl_fail_alloca
; CHECK-SAME: (ptr [[P:%.*]]) {
; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar2(ptr [[P]], ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar1(ptr [[P]])
; CHECK-NEXT: ret void
;
@@ -425,9 +425,9 @@ define void @prop_cb_def_wr_fail_alloca(ptr %p) {
; CHECK-LABEL: define {{[^@]+}}@prop_cb_def_wr_fail_alloca
; CHECK-SAME: (ptr [[P:%.*]]) {
; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar2(ptr [[P]], ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar1(ptr [[P]])
; CHECK-NEXT: ret void
;
@@ -440,10 +440,10 @@ define void @prop_fn_decl_partially_okay_alloca(ptr %p) {
; CHECK-LABEL: define {{[^@]+}}@prop_fn_decl_partially_okay_alloca
; CHECK-SAME: (ptr [[P:%.*]]) {
; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar1(ptr [[P]])
; CHECK-NEXT: call void @bar2(ptr [[P]], ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar1(ptr [[P]])
; CHECK-NEXT: ret void
;
@@ -456,10 +456,10 @@ define void @prop_cb_def_wr_partially_okay_alloca(ptr %p) {
; CHECK-LABEL: define {{[^@]+}}@prop_cb_def_wr_partially_okay_alloca
; CHECK-SAME: (ptr [[P:%.*]]) {
; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar1(ptr [[P]])
; CHECK-NEXT: call void @bar2(ptr [[P]], ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
; CHECK-NEXT: call void @bar1(ptr [[P]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/Inline/byval-align.ll b/llvm/test/Transforms/Inline/byval-align.ll
index 0b135aa..a23f364 100644
--- a/llvm/test/Transforms/Inline/byval-align.ll
+++ b/llvm/test/Transforms/Inline/byval-align.ll
@@ -28,13 +28,13 @@ define void @byval_caller(ptr nocapture align 64 %a, ptr %b) #0 {
; CHECK-SAME: (ptr align 64 captures(none) [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A1:%.*]] = alloca float, align 128
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A1]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 128 [[A1]], ptr align 128 [[A]], i64 4, i1 false)
; CHECK-NEXT: [[LOAD_I:%.*]] = load float, ptr [[A1]], align 4
; CHECK-NEXT: [[B_IDX_I:%.*]] = getelementptr inbounds float, ptr [[B]], i64 8
; CHECK-NEXT: [[ADD_I:%.*]] = fadd float [[LOAD_I]], 2.000000e+00
; CHECK-NEXT: store float [[ADD_I]], ptr [[B_IDX_I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A1]])
; CHECK-NEXT: [[CALLER_LOAD:%.*]] = load float, ptr [[B]], align 4
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 7
; CHECK-NEXT: store float [[CALLER_LOAD]], ptr [[ARRAYIDX]], align 4
diff --git a/llvm/test/Transforms/Inline/byval-tail-call.ll b/llvm/test/Transforms/Inline/byval-tail-call.ll
index 808104c..f8fd4a6 100644
--- a/llvm/test/Transforms/Inline/byval-tail-call.ll
+++ b/llvm/test/Transforms/Inline/byval-tail-call.ll
@@ -22,11 +22,11 @@ define void @bar(ptr byval(i32) %x) {
define void @foo(ptr %x) {
; CHECK-LABEL: @foo(
; CHECK-NEXT: [[X1:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X1]])
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[X:%.*]], align 1
; CHECK-NEXT: store i32 [[TMP2]], ptr [[X1]], align 4
; CHECK-NEXT: call void @ext(ptr nonnull [[X1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X1]])
; CHECK-NEXT: ret void
;
call void @bar(ptr byval(i32) %x)
@@ -42,12 +42,12 @@ define internal void @qux(ptr byval(i32) %x) {
define void @frob(ptr %x) {
; CHECK-LABEL: @frob(
; CHECK-NEXT: [[X1:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X1]])
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[X:%.*]], align 1
; CHECK-NEXT: store i32 [[TMP2]], ptr [[X1]], align 4
; CHECK-NEXT: call void @ext(ptr nonnull [[X1]])
; CHECK-NEXT: tail call void @ext(ptr null)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X1]])
; CHECK-NEXT: ret void
;
tail call void @qux(ptr byval(i32) %x)
@@ -71,11 +71,11 @@ define void @bar2(ptr byval(i32) %x) {
define void @foobar(ptr %x) {
; CHECK-LABEL: @foobar(
; CHECK-NEXT: [[X1:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X1]])
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[X:%.*]], align 1
; CHECK-NEXT: store i32 [[TMP2]], ptr [[X1]], align 4
; CHECK-NEXT: tail call void @ext2(ptr nonnull byval(i32) [[X1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X1]])
; CHECK-NEXT: ret void
;
tail call void @bar2(ptr byval(i32) %x)
@@ -85,9 +85,9 @@ define void @foobar(ptr %x) {
define void @barfoo() {
; CHECK-LABEL: @barfoo(
; CHECK-NEXT: [[X1:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X1]])
; CHECK-NEXT: tail call void @ext2(ptr nonnull byval(i32) [[X1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X1]])
; CHECK-NEXT: ret void
;
%x = alloca i32
diff --git a/llvm/test/Transforms/Inline/byval-with-non-alloca-addrspace.ll b/llvm/test/Transforms/Inline/byval-with-non-alloca-addrspace.ll
index 1d1cb45..e79ac66 100644
--- a/llvm/test/Transforms/Inline/byval-with-non-alloca-addrspace.ll
+++ b/llvm/test/Transforms/Inline/byval-with-non-alloca-addrspace.ll
@@ -26,11 +26,11 @@ define i64 @foo(ptr %arg) {
; CHECK-LABEL: define i64 @foo(
; CHECK-SAME: ptr [[ARG:%.*]]) {
; CHECK-NEXT: [[ARG1:%.*]] = alloca [[STRUCT:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[ARG1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARG1]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[ARG1]], ptr align 8 [[ARG]], i64 16, i1 false)
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [[STRUCT]], ptr [[ARG1]], i64 0, i32 1
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[ARG1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARG1]])
; CHECK-NEXT: ret i64 0
;
%1 = call i64 @bar(ptr byval(%struct) align 8 %arg)
diff --git a/llvm/test/Transforms/Inline/byval.ll b/llvm/test/Transforms/Inline/byval.ll
index b4a19c5..c945d7f 100644
--- a/llvm/test/Transforms/Inline/byval.ll
+++ b/llvm/test/Transforms/Inline/byval.ll
@@ -35,12 +35,12 @@ define i32 @test1() nounwind {
; CHECK-NEXT: store i32 1, ptr [[TMP1]], align 8
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_SS]], ptr [[S]], i32 0, i32 1
; CHECK-NEXT: store i64 2, ptr [[TMP4]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr [[S1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[S1]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[S1]], ptr [[S]], i64 12, i1 false)
; CHECK-NEXT: [[TMP1_I:%.*]] = load i32, ptr [[S1]], align 4
; CHECK-NEXT: [[TMP2_I:%.*]] = add i32 [[TMP1_I]], 1
; CHECK-NEXT: store i32 [[TMP2_I]], ptr [[S1]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr [[S1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[S1]])
; CHECK-NEXT: ret i32 0
;
entry:
@@ -104,10 +104,10 @@ define void @test3() nounwind {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[S1:%.*]] = alloca [[STRUCT_SS:%.*]], align 64
; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_SS]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr [[S1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[S1]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 64 [[S1]], ptr align 64 [[S]], i64 12, i1 false)
; CHECK-NEXT: call void @g3(ptr align 64 [[S1]]) #[[ATTR0]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr [[S1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[S1]])
; CHECK-NEXT: ret void
;
entry:
@@ -157,12 +157,12 @@ define i32 @test5() {
; CHECK-LABEL: define i32 @test5() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_S0:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[B]], ptr align 4 @b, i64 4, i1 false)
; CHECK-NEXT: store i32 0, ptr @b, align 4
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[B]], align 4
; CHECK-NEXT: store i32 [[TMP0]], ptr @a, align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]])
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4
; CHECK-NEXT: ret i32 [[TMP1]]
;
diff --git a/llvm/test/Transforms/Inline/callbr.ll b/llvm/test/Transforms/Inline/callbr.ll
index 1607700..57e92bb 100644
--- a/llvm/test/Transforms/Inline/callbr.ll
+++ b/llvm/test/Transforms/Inline/callbr.ll
@@ -10,8 +10,8 @@ define dso_local i32 @main() {
; CHECK-NEXT: [[I1_I:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: store i32 0, ptr [[I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I1_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I1_I]])
; CHECK-NEXT: store i32 0, ptr [[I1_I]], align 4
; CHECK-NEXT: [[I2_I:%.*]] = load i32, ptr [[I1_I]], align 4
; CHECK-NEXT: callbr void asm sideeffect "", "r,!i,!i,~{dirflag},~{fpsr},~{flags}"(i32 [[I2_I]])
@@ -27,8 +27,8 @@ define dso_local i32 @main() {
; CHECK-NEXT: br label [[T32_EXIT]]
; CHECK: t32.exit:
; CHECK-NEXT: [[I7_I:%.*]] = load i32, ptr [[I_I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I1_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I1_I]])
; CHECK-NEXT: ret i32 [[I7_I]]
;
bb:
diff --git a/llvm/test/Transforms/Inline/devirtualize-4.ll b/llvm/test/Transforms/Inline/devirtualize-4.ll
index d29360f..f96b5a9 100644
--- a/llvm/test/Transforms/Inline/devirtualize-4.ll
+++ b/llvm/test/Transforms/Inline/devirtualize-4.ll
@@ -48,14 +48,14 @@
define dso_local void @_Z4Testv() local_unnamed_addr {
entry:
%o = alloca %class.Impl, align 8
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %o)
+ call void @llvm.lifetime.start.p0(ptr nonnull %o)
call void @_ZN4ImplC2Ev(ptr nonnull %o)
call fastcc void @_ZL11IndirectRunR9Interface(ptr nonnull dereferenceable(8) %o)
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %o)
+ call void @llvm.lifetime.end.p0(ptr nonnull %o)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
define linkonce_odr dso_local void @_ZN4ImplC2Ev(ptr %this) unnamed_addr align 2 {
entry:
@@ -74,7 +74,7 @@ entry:
ret void
}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define linkonce_odr dso_local void @_ZN9InterfaceC2Ev(ptr %this) unnamed_addr align 2 {
entry:
@@ -85,10 +85,10 @@ entry:
define linkonce_odr dso_local void @_ZN4Impl3RunEv(ptr %this) unnamed_addr align 2 {
entry:
%ref.tmp = alloca ptr, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %ref.tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %ref.tmp)
store ptr %this, ptr %ref.tmp, align 8
call void @_Z13DoNotOptimizeIP4ImplEvRKT_(ptr nonnull dereferenceable(8) %ref.tmp)
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %ref.tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %ref.tmp)
ret void
}
@@ -160,10 +160,10 @@ memptr.end: ; preds = %memptr.nonvirtual,
define i32 @_Z2g1v() {
entry:
%a = alloca %struct.A, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call void @_ZN1AC1Ev(ptr nonnull %a)
%call = call i32 @_Z1fP1AMS_FivE(ptr nonnull %a, i64 1, i64 0)
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret i32 %call
}
@@ -176,10 +176,10 @@ entry:
define i32 @_Z2g2v() {
entry:
%a = alloca %struct.A, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
call void @_ZN1AC1Ev(ptr nonnull %a)
%call = call i32 @_Z1fP1AMS_FivE(ptr nonnull %a, i64 9, i64 0)
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret i32 %call
}
diff --git a/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll b/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll
index 9b293d39..aad192a 100644
--- a/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll
+++ b/llvm/test/Transforms/Inline/dynamic-alloca-simplified-large.ll
@@ -54,9 +54,9 @@ define void @caller2_below_threshold(ptr %p1, i1 %b) {
; CHECK-NEXT: br i1 [[COND]], label [[EXIT:%.*]], label [[SPLIT:%.*]]
; CHECK: split:
; CHECK-NEXT: [[SAVEDSTACK:%.*]] = call ptr @llvm.stacksave.p0()
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 60000, ptr [[VLA_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[VLA_I]])
; CHECK-NEXT: call void @extern_call(ptr nonnull [[VLA_I]]) #[[ATTR3]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 60000, ptr [[VLA_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[VLA_I]])
; CHECK-NEXT: call void @llvm.stackrestore.p0(ptr [[SAVEDSTACK]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
diff --git a/llvm/test/Transforms/Inline/inline-deferred-instsimplify.ll b/llvm/test/Transforms/Inline/inline-deferred-instsimplify.ll
index c74351b..02f5774 100644
--- a/llvm/test/Transforms/Inline/inline-deferred-instsimplify.ll
+++ b/llvm/test/Transforms/Inline/inline-deferred-instsimplify.ll
@@ -52,7 +52,7 @@ return: ; preds = %check_pointers_are_
define i32 @main() {
; CHECK-LABEL: define i32 @main() {
; CHECK-NEXT: [[G_VAR:%.*]] = alloca [[STRUCT_A:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr [[G_VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[G_VAR]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[G_VAR]], ptr align 8 @g_var, i64 20, i1 false)
; CHECK-NEXT: [[VAL_I:%.*]] = load i32, ptr [[G_VAR]], align 8
; CHECK-NEXT: [[DOTNOT_I:%.*]] = icmp eq i32 [[VAL_I]], 0
@@ -68,7 +68,7 @@ define i32 @main() {
; CHECK-NEXT: call void @abort()
; CHECK-NEXT: unreachable
; CHECK: callee.exit:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr [[G_VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[G_VAR]])
; CHECK-NEXT: ret i32 0
;
call void @callee(ptr noundef byval(%struct.a) align 8 @g_var)
diff --git a/llvm/test/Transforms/Inline/inline-tail.ll b/llvm/test/Transforms/Inline/inline-tail.ll
index 0bfd056..b2bf3bb 100644
--- a/llvm/test/Transforms/Inline/inline-tail.ll
+++ b/llvm/test/Transforms/Inline/inline-tail.ll
@@ -64,7 +64,7 @@ define void @test_byval_a(ptr byval(i32) %p) {
; CHECK-LABEL: define void @test_byval_a
; CHECK-SAME: (ptr byval(i32) [[P:%.*]]) {
; CHECK-NEXT: [[P1:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[P1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P1]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[P1]], ptr [[P]], i64 4, i1 false)
; CHECK-NEXT: musttail call void @test_byval_c(ptr byval(i32) [[P1]])
; CHECK-NEXT: ret void
@@ -87,7 +87,7 @@ define void @test_dynalloca_a(ptr byval(i32) %p, i32 %n) {
; CHECK-SAME: (ptr byval(i32) [[P:%.*]], i32 [[N:%.*]]) {
; CHECK-NEXT: [[P1:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[SAVEDSTACK:%.*]] = call ptr @llvm.stacksave.p0()
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[P1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P1]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[P1]], ptr [[P]], i64 4, i1 false)
; CHECK-NEXT: [[BUF_I:%.*]] = alloca i8, i32 [[N]], align 1
; CHECK-NEXT: call void @escape(ptr [[BUF_I]])
diff --git a/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll b/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll
index 4e13ff4..4ac4675 100644
--- a/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll
+++ b/llvm/test/Transforms/Inline/inlined-mustprogress-loop-metadata.ll
@@ -312,9 +312,9 @@ define void @caller_multiple(i32 %a, i32 %b) #1 {
; CHECK-NEXT: store i32 [[INC]], ptr [[I]], align 4
; CHECK-NEXT: br label %[[FOR_COND1]]
; CHECK: [[FOR_END4]]:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_ADDR_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[B_ADDR_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_ADDR_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_ADDR_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I_I]])
; CHECK-NEXT: store i32 0, ptr [[A_ADDR_I]], align 4
; CHECK-NEXT: store i32 5, ptr [[B_ADDR_I]], align 4
; CHECK-NEXT: br label %[[FOR_COND_I:.*]]
@@ -526,9 +526,9 @@ define void @caller_nested(i32 %a, i32 %b) #1 {
; CHECK-NEXT: store i32 [[INC14]], ptr [[I9]], align 4
; CHECK-NEXT: br label %[[FOR_COND10]]
; CHECK: [[FOR_END15]]:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_ADDR_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[B_ADDR_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_ADDR_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B_ADDR_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I_I]])
; CHECK-NEXT: store i32 0, ptr [[A_ADDR_I]], align 4
; CHECK-NEXT: store i32 5, ptr [[B_ADDR_I]], align 4
; CHECK-NEXT: br label %[[FOR_COND_I:.*]]
diff --git a/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll b/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll
index 7438ef3..074550b 100644
--- a/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll
+++ b/llvm/test/Transforms/Inline/lifetime-no-datalayout.ll
@@ -18,9 +18,9 @@ define void @helper() {
define void @test() {
; CHECK-LABEL: define void @test() {
; CHECK-NEXT: [[A_I:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
; CHECK-NEXT: call void @use(ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
; CHECK-NEXT: ret void
;
call void @helper()
diff --git a/llvm/test/Transforms/Inline/lifetime.ll b/llvm/test/Transforms/Inline/lifetime.ll
index 3ef5019..06b911d 100644
--- a/llvm/test/Transforms/Inline/lifetime.ll
+++ b/llvm/test/Transforms/Inline/lifetime.ll
@@ -2,21 +2,21 @@
; RUN: opt -passes=inline -S < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
define void @helper_both_markers() {
; CHECK-LABEL: define void @helper_both_markers() {
; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i8
; Size in llvm.lifetime.start / llvm.lifetime.end differs from
; allocation size. We should use the former.
- call void @llvm.lifetime.start.p0(i64 2, ptr %a)
- call void @llvm.lifetime.end.p0(i64 2, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -24,10 +24,10 @@ define void @test_both_markers() {
; CHECK-LABEL: define void @test_both_markers() {
; CHECK-NEXT: [[A_I1:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_I:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[A_I1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[A_I1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I1]])
; CHECK-NEXT: ret void
;
call void @helper_both_markers()
@@ -54,12 +54,12 @@ define void @test_no_marker() {
; CHECK-LABEL: define void @test_no_marker() {
; CHECK-NEXT: [[A_I1:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[A_I:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
; CHECK-NEXT: call void @use(ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[A_I1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I1]])
; CHECK-NEXT: call void @use(ptr [[A_I1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[A_I1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I1]])
; CHECK-NEXT: ret void
;
call void @helper_no_markers()
@@ -70,13 +70,13 @@ define void @test_no_marker() {
define void @helper_two_casts() {
; CHECK-LABEL: define void @helper_two_casts() {
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -84,10 +84,10 @@ define void @test_two_casts() {
; CHECK-LABEL: define void @test_two_casts() {
; CHECK-NEXT: [[A_I1:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[A_I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A_I1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A_I1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I1]])
; CHECK-NEXT: ret void
;
call void @helper_two_casts()
@@ -109,9 +109,9 @@ define void @helper_arrays_alloca() {
define void @test_arrays_alloca() {
; CHECK-LABEL: define void @test_arrays_alloca() {
; CHECK-NEXT: [[A_I:%.*]] = alloca [10 x i32], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 40, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_I]])
; CHECK-NEXT: call void @use(ptr [[A_I]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 40, ptr [[A_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A_I]])
; CHECK-NEXT: ret void
;
call void @helper_arrays_alloca()
diff --git a/llvm/test/Transforms/Inline/no-inline-incompatible-gc.ll b/llvm/test/Transforms/Inline/no-inline-incompatible-gc.ll
index 531801d..2bded9c 100644
--- a/llvm/test/Transforms/Inline/no-inline-incompatible-gc.ll
+++ b/llvm/test/Transforms/Inline/no-inline-incompatible-gc.ll
@@ -16,12 +16,12 @@
define i32 @caller_no_gc() {
; CHECK-LABEL: define i32 @caller_no_gc() gc "example" {
; CHECK-NEXT: [[ROOT_I:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ROOT_I]])
; CHECK-NEXT: call void @llvm.gcroot(ptr [[ROOT_I]], ptr null)
; CHECK-NEXT: [[OBJ_I:%.*]] = call ptr @h()
; CHECK-NEXT: store ptr [[OBJ_I]], ptr [[ROOT_I]], align 8
; CHECK-NEXT: [[LENGTH_I:%.*]] = load i32, ptr [[OBJ_I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ROOT_I]])
; CHECK-NEXT: ret i32 [[LENGTH_I]]
;
%x = call i32 @callee_with_gc()
@@ -32,12 +32,12 @@ define i32 @caller_no_gc() {
define i32 @caller_same_gc() gc "example" {
; CHECK-LABEL: define i32 @caller_same_gc() gc "example" {
; CHECK-NEXT: [[ROOT_I:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ROOT_I]])
; CHECK-NEXT: call void @llvm.gcroot(ptr [[ROOT_I]], ptr null)
; CHECK-NEXT: [[OBJ_I:%.*]] = call ptr @h()
; CHECK-NEXT: store ptr [[OBJ_I]], ptr [[ROOT_I]], align 8
; CHECK-NEXT: [[LENGTH_I:%.*]] = load i32, ptr [[OBJ_I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ROOT_I]])
; CHECK-NEXT: ret i32 [[LENGTH_I]]
;
%x = call i32 @callee_with_gc()
@@ -97,12 +97,12 @@ define i32 @callee_with_other_gc() gc "other-example" {
define i32 @caller_inline_first_caller() {
; CHECK-LABEL: define i32 @caller_inline_first_caller() gc "example" {
; CHECK-NEXT: [[ROOT_I:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ROOT_I]])
; CHECK-NEXT: call void @llvm.gcroot(ptr [[ROOT_I]], ptr null)
; CHECK-NEXT: [[OBJ_I:%.*]] = call ptr @h()
; CHECK-NEXT: store ptr [[OBJ_I]], ptr [[ROOT_I]], align 8
; CHECK-NEXT: [[LENGTH_I:%.*]] = load i32, ptr [[OBJ_I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ROOT_I]])
; CHECK-NEXT: [[Y:%.*]] = call i32 @callee_with_other_gc()
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LENGTH_I]], [[Y]]
; CHECK-NEXT: ret i32 [[ADD]]
@@ -118,12 +118,12 @@ define i32 @caller_inline_first_caller() {
define i32 @caller_inline_second_caller() gc "example" {
; CHECK-LABEL: define i32 @caller_inline_second_caller() gc "example" {
; CHECK-NEXT: [[ROOT_I:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ROOT_I]])
; CHECK-NEXT: call void @llvm.gcroot(ptr [[ROOT_I]], ptr null)
; CHECK-NEXT: [[OBJ_I:%.*]] = call ptr @h()
; CHECK-NEXT: store ptr [[OBJ_I]], ptr [[ROOT_I]], align 8
; CHECK-NEXT: [[LENGTH_I:%.*]] = load i32, ptr [[OBJ_I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[ROOT_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ROOT_I]])
; CHECK-NEXT: [[Y:%.*]] = call i32 @callee_with_other_gc()
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LENGTH_I]], [[Y]]
; CHECK-NEXT: ret i32 [[ADD]]
diff --git a/llvm/test/Transforms/Inline/noalias-calls-always.ll b/llvm/test/Transforms/Inline/noalias-calls-always.ll
index a80cd12..18a65b9 100644
--- a/llvm/test/Transforms/Inline/noalias-calls-always.ll
+++ b/llvm/test/Transforms/Inline/noalias-calls-always.ll
@@ -33,13 +33,13 @@ define void @foo(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %b)
; CHECK-NEXT: [[L_I:%.*]] = alloca i8, i32 512, align 1
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]])
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[L_I]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A:%.*]], ptr align 16 [[B:%.*]], i64 16, i1 false), !noalias [[META3]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr readonly align 16 [[C:%.*]], i64 16, i1 false), !noalias [[META0]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr readonly align 16 [[C]], i64 16, i1 false), !alias.scope [[META5:![0-9]+]]
; CHECK-NEXT: call void @hey(), !noalias [[META5]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META0]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[L_I]])
; CHECK-NEXT: ret void
;
entry:
@@ -74,13 +74,13 @@ define void @foo_cs(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %
; CHECK-NEXT: [[L_I:%.*]] = alloca i8, i32 512, align 1
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[L_I]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A:%.*]], ptr align 16 [[B:%.*]], i64 16, i1 false), !noalias [[META9]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr readonly align 16 [[C:%.*]], i64 16, i1 false), !noalias [[META6]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr readonly align 16 [[C]], i64 16, i1 false), !alias.scope [[META11:![0-9]+]]
; CHECK-NEXT: call void @hey(), !noalias [[META11]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META6]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[L_I]])
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/Inline/noalias-calls.ll b/llvm/test/Transforms/Inline/noalias-calls.ll
index fdbad60..4673dae 100644
--- a/llvm/test/Transforms/Inline/noalias-calls.ll
+++ b/llvm/test/Transforms/Inline/noalias-calls.ll
@@ -36,13 +36,13 @@ define void @foo(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %b)
; CHECK-NEXT: [[L_I:%.*]] = alloca i8, i32 512, align 1
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]])
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[L_I]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[B]], i64 16, i1 false), !noalias [[META3]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META0]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr readonly align 16 [[C]], i64 16, i1 false), !alias.scope [[META5:![0-9]+]]
; CHECK-NEXT: call void @hey(), !noalias [[META5]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META0]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[L_I]])
; CHECK-NEXT: ret void
;
entry:
@@ -79,13 +79,13 @@ define void @foo_cs(ptr nocapture %a, ptr nocapture readonly %c, ptr nocapture %
; CHECK-NEXT: [[L_I:%.*]] = alloca i8, i32 512, align 1
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[L_I]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr align 16 [[B]], i64 16, i1 false), !noalias [[META9]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[B]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META6]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[A]], ptr readonly align 16 [[C]], i64 16, i1 false), !alias.scope [[META11:![0-9]+]]
; CHECK-NEXT: call void @hey(), !noalias [[META11]]
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[L_I]], ptr readonly align 16 [[C]], i64 16, i1 false), !noalias [[META6]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 512, ptr [[L_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[L_I]])
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/assume_inevitable.ll b/llvm/test/Transforms/InstCombine/assume_inevitable.ll
index 2643c9b..5f27ff1 100644
--- a/llvm/test/Transforms/InstCombine/assume_inevitable.ll
+++ b/llvm/test/Transforms/InstCombine/assume_inevitable.ll
@@ -35,10 +35,10 @@ entry:
%dummy_eq = icmp ugt i32 %loadres, 42
tail call void @llvm.assume(i1 %dummy_eq)
- call void @llvm.lifetime.start.p0(i64 1, ptr %dummy)
+ call void @llvm.lifetime.start.p0(ptr %dummy)
%i = call ptr @llvm.invariant.start.p0(i64 1, ptr %dummy)
call void @llvm.invariant.end.p0(ptr %i, i64 1, ptr %dummy)
- call void @llvm.lifetime.end.p0(i64 1, ptr %dummy)
+ call void @llvm.lifetime.end.p0(ptr %dummy)
%m_a = call ptr @llvm.ptr.annotation.p0(ptr %m, ptr @.str, ptr @.str1, i32 2, ptr null)
%objsz = call i64 @llvm.objectsize.i64.p0(ptr %c, i1 false)
@@ -61,8 +61,8 @@ declare i64 @llvm.objectsize.i64.p0(ptr, i1)
declare i32 @llvm.annotation.i32(i32, ptr, ptr, i32)
declare ptr @llvm.ptr.annotation.p0(ptr, ptr, ptr, i32, ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare ptr @llvm.invariant.start.p0(i64, ptr nocapture)
declare void @llvm.invariant.end.p0(ptr, i64, ptr nocapture)
diff --git a/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll b/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll
index fe8b321..93c4ae6 100644
--- a/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll
+++ b/llvm/test/Transforms/InstCombine/builtin-object-size-custom-dl.ll
@@ -15,16 +15,16 @@ entry:
define i32 @objsize2_custom_idx() #0 {
entry:
%var = alloca %struct.V, align 4
- call void @llvm.lifetime.start.p0(i64 28, ptr %var) #3
+ call void @llvm.lifetime.start.p0(ptr %var) #3
%arrayidx = getelementptr inbounds [10 x i8], ptr %var, i64 0, i64 1
%0 = call i64 @llvm.objectsize.i64.p0(ptr %arrayidx, i1 false, i1 false, i1 false)
%conv = trunc i64 %0 to i32
- call void @llvm.lifetime.end.p0(i64 28, ptr %var) #3
+ call void @llvm.lifetime.end.p0(ptr %var) #3
ret i32 %conv
; CHECK: ret i32 27
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare ptr @malloc(i64)
declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1)
diff --git a/llvm/test/Transforms/InstCombine/builtin-object-size-offset.ll b/llvm/test/Transforms/InstCombine/builtin-object-size-offset.ll
index b8919a7..051466f 100644
--- a/llvm/test/Transforms/InstCombine/builtin-object-size-offset.ll
+++ b/llvm/test/Transforms/InstCombine/builtin-object-size-offset.ll
@@ -25,24 +25,24 @@ define i32 @foo1(i32 %N) {
entry:
%Big = alloca [20 x i8], align 16
%Small = alloca [10 x i8], align 1
- call void @llvm.lifetime.start.p0(i64 20, ptr %Big)
- call void @llvm.lifetime.start.p0(i64 10, ptr %Small)
+ call void @llvm.lifetime.start.p0(ptr %Big)
+ call void @llvm.lifetime.start.p0(ptr %Small)
%tobool = icmp ne i32 %N, 0
%add.ptr = getelementptr inbounds [20 x i8], ptr %Big, i64 0, i64 10
%cond = select i1 %tobool, ptr %add.ptr, ptr %Small
%0 = call i64 @llvm.objectsize.i64.p0(ptr %cond, i1 false)
%conv = trunc i64 %0 to i32
- call void @llvm.lifetime.end.p0(i64 10, ptr %Small)
- call void @llvm.lifetime.end.p0(i64 20, ptr %Big)
+ call void @llvm.lifetime.end.p0(ptr %Small)
+ call void @llvm.lifetime.end.p0(ptr %Big)
ret i32 %conv
; CHECK: ret i32 10
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare i64 @llvm.objectsize.i64.p0(ptr, i1)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @foo() {
entry:
diff --git a/llvm/test/Transforms/InstCombine/builtin-object-size-ptr.ll b/llvm/test/Transforms/InstCombine/builtin-object-size-ptr.ll
index 533451f..3a7b760 100644
--- a/llvm/test/Transforms/InstCombine/builtin-object-size-ptr.ll
+++ b/llvm/test/Transforms/InstCombine/builtin-object-size-ptr.ll
@@ -18,11 +18,11 @@ define i32 @foo() #0 {
; CHECK-NEXT: ret i32 27
;
%var = alloca %struct.V, align 4
- call void @llvm.lifetime.start.p0(i64 28, ptr %var) #3
+ call void @llvm.lifetime.start.p0(ptr %var) #3
%arrayidx = getelementptr inbounds [10 x i8], ptr %var, i64 0, i64 1
%t1 = call i64 @llvm.objectsize.i64.p0(ptr %arrayidx, i1 false)
%conv = trunc i64 %t1 to i32
- call void @llvm.lifetime.end.p0(i64 28, ptr %var) #3
+ call void @llvm.lifetime.end.p0(ptr %var) #3
ret i32 %conv
}
@@ -63,9 +63,9 @@ define ptr @minimal_invariant_start_use(i8 %x) {
ret ptr %i
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i64 @llvm.objectsize.i64.p0(ptr, i1) #2
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #0
declare ptr @llvm.invariant.start.p0(i64 immarg, ptr nocapture) #0
declare void @llvm.invariant.end.p0(ptr, i64 immarg, ptr nocapture) #0
diff --git a/llvm/test/Transforms/InstCombine/compare-alloca.ll b/llvm/test/Transforms/InstCombine/compare-alloca.ll
index a27cd70..55d92b7 100644
--- a/llvm/test/Transforms/InstCombine/compare-alloca.ll
+++ b/llvm/test/Transforms/InstCombine/compare-alloca.ll
@@ -86,18 +86,18 @@ define i1 @alloca_argument_compare_escaped_through_store(ptr %arg, ptr %ptr) {
ret i1 %cmp
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define i1 @alloca_argument_compare_benign_instrs(ptr %arg) {
; CHECK-LABEL: @alloca_argument_compare_benign_instrs(
; CHECK-NEXT: ret i1 false
;
%alloc = alloca i8
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloc)
+ call void @llvm.lifetime.start.p0(ptr %alloc)
%cmp = icmp eq ptr %arg, %alloc
%x = load i8, ptr %arg
store i8 %x, ptr %alloc
- call void @llvm.lifetime.end.p0(i64 1, ptr %alloc)
+ call void @llvm.lifetime.end.p0(ptr %alloc)
ret i1 %cmp
}
diff --git a/llvm/test/Transforms/InstCombine/deadcode.ll b/llvm/test/Transforms/InstCombine/deadcode.ll
index f3e1ba6..4dcdbb9 100644
--- a/llvm/test/Transforms/InstCombine/deadcode.ll
+++ b/llvm/test/Transforms/InstCombine/deadcode.ll
@@ -22,13 +22,13 @@ define ptr @test2(i32 %width) {
declare ptr @llvm.stacksave()
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
define void @test3() {
%a = alloca i32
- call void @llvm.lifetime.start.p0(i64 -1, ptr %a)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll b/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll
index 422e179..597bd2c 100644
--- a/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll
+++ b/llvm/test/Transforms/InstCombine/lifetime-no-null-opt.ll
@@ -2,8 +2,8 @@
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
declare void @llvm.dbg.declare(metadata, metadata, metadata)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @foo(ptr nocapture, ptr nocapture)
define void @bar(i1 %flag) #0 !dbg !4 {
@@ -20,11 +20,11 @@ define void @bar(i1 %flag) #0 !dbg !4 {
; CHECK-NEXT: #dbg_declare(ptr [[TEXT]], [[META16:![0-9]+]], !DIExpression(), [[META24:![0-9]+]])
; CHECK-NEXT: br label [[FIN:%.*]]
; CHECK: else:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[TEXT]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[BUFF]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TEXT]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BUFF]])
; CHECK-NEXT: call void @foo(ptr nonnull [[BUFF]], ptr nonnull [[TEXT]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[BUFF]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TEXT]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BUFF]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TEXT]])
; CHECK-NEXT: br label [[FIN]]
; CHECK: fin:
; CHECK-NEXT: ret void
@@ -35,31 +35,31 @@ entry:
br i1 %flag, label %if, label %else
if:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %text)
br label %bb2
bb2:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %buff)
br label %bb3
bb3:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
call void @llvm.dbg.declare(metadata ptr %text, metadata !14, metadata !25), !dbg !26
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
br label %fin
else:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %buff)
call void @foo(ptr %buff, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %text)
br label %fin
fin:
diff --git a/llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll b/llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll
index e379b32..fd45fe2 100644
--- a/llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll
+++ b/llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @foo(ptr nocapture)
define void @asan() sanitize_address {
@@ -9,8 +9,8 @@ entry:
; CHECK-LABEL: @asan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -24,8 +24,8 @@ entry:
; CHECK-LABEL: @hwasan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -39,8 +39,8 @@ entry:
; CHECK-LABEL: @msan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -54,8 +54,8 @@ entry:
; CHECK-LABEL: @no_asan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; CHECK-NO: call void @llvm.lifetime
call void @foo(ptr %text) ; Keep alloca alive
diff --git a/llvm/test/Transforms/InstCombine/lifetime.ll b/llvm/test/Transforms/InstCombine/lifetime.ll
index b94c9694..6313dba 100644
--- a/llvm/test/Transforms/InstCombine/lifetime.ll
+++ b/llvm/test/Transforms/InstCombine/lifetime.ll
@@ -2,8 +2,8 @@
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
declare void @llvm.dbg.declare(metadata, metadata, metadata)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @foo(ptr nocapture, ptr nocapture)
define void @bar(i1 %flag) !dbg !4 {
@@ -20,11 +20,11 @@ define void @bar(i1 %flag) !dbg !4 {
; CHECK-NEXT: #dbg_declare(ptr [[TEXT]], [[META16:![0-9]+]], !DIExpression(), [[META24:![0-9]+]])
; CHECK-NEXT: br label [[FIN:%.*]]
; CHECK: else:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[TEXT]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[BUFF]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TEXT]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[BUFF]])
; CHECK-NEXT: call void @foo(ptr nonnull [[BUFF]], ptr nonnull [[TEXT]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[BUFF]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[TEXT]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[BUFF]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TEXT]])
; CHECK-NEXT: br label [[FIN]]
; CHECK: fin:
; CHECK-NEXT: ret void
@@ -35,31 +35,31 @@ entry:
br i1 %flag, label %if, label %else
if:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %text)
br label %bb2
bb2:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %buff)
br label %bb3
bb3:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
call void @llvm.dbg.declare(metadata ptr %text, metadata !14, metadata !25), !dbg !26
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
br label %fin
else:
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.start.p0(i64 1, ptr %buff)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %buff)
call void @foo(ptr %buff, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %buff)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %buff)
+ call void @llvm.lifetime.end.p0(ptr %text)
br label %fin
fin:
diff --git a/llvm/test/Transforms/InstCombine/lower-dbg-declare.ll b/llvm/test/Transforms/InstCombine/lower-dbg-declare.ll
index 0072153..7aa5eed 100644
--- a/llvm/test/Transforms/InstCombine/lower-dbg-declare.ll
+++ b/llvm/test/Transforms/InstCombine/lower-dbg-declare.ll
@@ -26,7 +26,7 @@ entry:
%retval = alloca i32, align 4
%d1 = alloca i32, align 4
store i32 0, ptr %retval, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %d1) #4, !dbg !17
+ call void @llvm.lifetime.start.p0(ptr %d1) #4, !dbg !17
; CHECK: #dbg_value(i32 42, [[METADATA_IDX1:![0-9]+]], !DIExpression(),
; CHECK-NEXT: store
call void @llvm.dbg.declare(metadata ptr %d1, metadata !16, metadata !DIExpression()), !dbg !17
@@ -48,11 +48,11 @@ while.body: ; preds = %while.cond
br label %while.cond, !dbg !22, !llvm.loop !24
while.end: ; preds = %while.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %d1) #4, !dbg !25
+ call void @llvm.lifetime.end.p0(ptr %d1) #4, !dbg !25
ret i32 0, !dbg !26
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.dbg.declare(metadata, metadata, metadata)
@@ -64,7 +64,7 @@ define internal void @_ZL6escapeRi(ptr dereferenceable(4) %c) #3 !dbg !34 {
ret void
}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!8, !9, !10}
diff --git a/llvm/test/Transforms/InstCombine/malloc-free.ll b/llvm/test/Transforms/InstCombine/malloc-free.ll
index d8a1c07..5cff5d6 100644
--- a/llvm/test/Transforms/InstCombine/malloc-free.ll
+++ b/llvm/test/Transforms/InstCombine/malloc-free.ll
@@ -97,8 +97,8 @@ define i1 @foo() {
ret i1 %z
}
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
declare i64 @llvm.objectsize.i64(ptr, i1)
declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
diff --git a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
index 9c9ba83..64091a9 100644
--- a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
+++ b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
@@ -178,14 +178,14 @@ define void @test4() {
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
define void @test5() {
; CHECK-LABEL: @test5(
; CHECK-NEXT: call void @baz(ptr nonnull byval(i8) @G)
; CHECK-NEXT: ret void
;
%A = alloca %T
- call void @llvm.lifetime.start.p0(i64 -1, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %A, ptr align 4 @G, i64 124, i1 false)
call void @baz(ptr byval(i8) %A)
ret void
@@ -308,7 +308,7 @@ define float @test11(i64 %i) {
entry:
%a = alloca [4 x float], align 4
- call void @llvm.lifetime.start.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memcpy.p0.p1.i64(ptr align 4 %a, ptr addrspace(1) align 4 @I, i64 16, i1 false)
%g = getelementptr inbounds [4 x float], ptr %a, i64 0, i64 %i
%r = load float, ptr %g, align 4
@@ -320,7 +320,7 @@ define float @test11_volatile(i64 %i) {
; CHECK-LABEL: @test11_volatile(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca [4 x float], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p1.i64(ptr align 4 [[A]], ptr addrspace(1) align 4 @I, i64 16, i1 true)
; CHECK-NEXT: [[G:%.*]] = getelementptr inbounds [4 x float], ptr [[A]], i64 0, i64 [[I:%.*]]
; CHECK-NEXT: [[R:%.*]] = load float, ptr [[G]], align 4
@@ -329,7 +329,7 @@ define float @test11_volatile(i64 %i) {
entry:
%a = alloca [4 x float], align 4
- call void @llvm.lifetime.start.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memcpy.p0.p1.i64(ptr align 4 %a, ptr addrspace(1) align 4 @I, i64 16, i1 true)
%g = getelementptr inbounds [4 x float], ptr %a, i64 0, i64 %i
%r = load float, ptr %g, align 4
diff --git a/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll b/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
index 86e586e..a4e247e 100644
--- a/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
+++ b/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
@@ -236,12 +236,11 @@ define float @simple_recurrence_intrinsic_maximumnum(i32 %n, float %a, float %b)
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX_ACC:%.*]] = phi float [ [[FMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX]] = call nnan float @llvm.maximumnum.f32(float [[FMAX_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMAX:%.*]] = call nnan float @llvm.maximumnum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMAX]]
;
entry:
@@ -265,12 +264,11 @@ define float @simple_recurrence_intrinsic_minimumnum(i32 %n, float %a, float %b)
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN_ACC:%.*]] = phi float [ [[FMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN]] = call nnan float @llvm.minimumnum.f32(float [[FMIN_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMIN:%.*]] = call nnan float @llvm.minimumnum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMIN]]
;
entry:
@@ -296,7 +294,7 @@ define i8 @simple_recurrence_intrinsic_multiuse_phi(i8 %n, i8 %a, i8 %b) {
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: [[UMAX_ACC:%.*]] = phi i8 [ [[UMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
; CHECK-NEXT: call void @use(i8 [[UMAX_ACC]])
-; CHECK-NEXT: [[UMAX]] = call i8 @llvm.umax.i8(i8 [[UMAX_ACC]], i8 [[B]])
+; CHECK-NEXT: [[UMAX]] = call i8 @llvm.umax.i8(i8 [[A]], i8 [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
diff --git a/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll b/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll
index ccb9601..bd43daa 100644
--- a/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll
+++ b/llvm/test/Transforms/InstCombine/sink_sideeffecting_instruction.ll
@@ -49,12 +49,12 @@ define i32 @test() {
; CHECK-NEXT: bb:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[VAR1:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: [[VAR3:%.*]] = call i32 @foo(ptr nonnull writeonly [[VAR]])
; CHECK-NEXT: [[VAR4:%.*]] = icmp eq i32 [[VAR3]], 0
; CHECK-NEXT: br i1 [[VAR4]], label [[BB5:%.*]], label [[BB14:%.*]]
; CHECK: bb5:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR1]])
; CHECK-NEXT: [[VAR8:%.*]] = load i32, ptr [[VAR]], align 4
; CHECK-NEXT: [[VAR9:%.*]] = icmp eq i32 [[VAR8]], 0
; CHECK-NEXT: [[VAR7:%.*]] = call i32 @foo(ptr nonnull writeonly [[VAR1]])
@@ -66,23 +66,23 @@ define i32 @test() {
; CHECK-NEXT: br label [[BB12]]
; CHECK: bb12:
; CHECK-NEXT: [[VAR13:%.*]] = phi i32 [ [[VAR11]], [[BB10]] ], [ [[VAR7]], [[BB_CRIT_EDGE]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR1]])
; CHECK-NEXT: br label [[BB14]]
; CHECK: bb14:
; CHECK-NEXT: [[VAR15:%.*]] = phi i32 [ [[VAR13]], [[BB12]] ], [ 0, [[BB:%.*]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: ret i32 [[VAR15]]
;
bb:
%var = alloca i32, align 4
%var1 = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %var) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %var) #4
%var3 = call i32 @foo(ptr nonnull writeonly %var)
%var4 = icmp eq i32 %var3, 0
br i1 %var4, label %bb5, label %bb14
bb5: ; preds = %bb
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %var1) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %var1) #4
%var8 = load i32, ptr %var, align 4
%var9 = icmp eq i32 %var8, 0
%var7 = call i32 @foo(ptr nonnull writeonly %var1)
@@ -97,12 +97,12 @@ bb_crit_edge:
bb12: ; preds = %bb10, %bb5
%var13 = phi i32 [ %var11, %bb10 ], [ %var7, %bb_crit_edge ]
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %var1) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %var1) #4
br label %bb14
bb14: ; preds = %bb12, %bb
%var15 = phi i32 [ %var13, %bb12 ], [ 0, %bb ]
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %var)
+ call void @llvm.lifetime.end.p0(ptr nonnull %var)
ret i32 %var15
}
@@ -325,18 +325,18 @@ define i32 @sink_lifetime1(i1 %c) {
; CHECK-LABEL: @sink_lifetime1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]]
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
; CHECK: use_block:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: ret i32 [[VAR3]]
;
entry:
%var = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.start.p0(ptr %var)
%var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
br i1 %c, label %early_return, label %use_block
@@ -344,7 +344,7 @@ early_return:
ret i32 0
use_block:
- call void @llvm.lifetime.end.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.end.p0(ptr %var)
ret i32 %var3
}
@@ -352,25 +352,25 @@ define i32 @sink_lifetime2(i1 %c) {
; CHECK-LABEL: @sink_lifetime2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]]
; CHECK-NEXT: br i1 [[C:%.*]], label [[MERGE:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: merge:
; CHECK-NEXT: [[RET:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[VAR3]], [[USE_BLOCK]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: ret i32 [[RET]]
; CHECK: use_block:
; CHECK-NEXT: br label [[MERGE]]
;
entry:
%var = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.start.p0(ptr %var)
%var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
br i1 %c, label %merge, label %use_block
merge:
%ret = phi i32 [0, %entry], [%var3, %use_block]
- call void @llvm.lifetime.end.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.end.p0(ptr %var)
ret i32 %ret
use_block:
@@ -390,8 +390,8 @@ define i32 @sink_lifetime3(i1 %c) {
;
entry:
%var = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %var)
- call void @llvm.lifetime.end.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.start.p0(ptr %var)
+ call void @llvm.lifetime.end.p0(ptr %var)
; If unknown accesses %var, that's UB
%var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
br i1 %c, label %early_return, label %use_block
@@ -407,9 +407,9 @@ define i32 @sink_lifetime4a(i1 %c) {
; CHECK-LABEL: @sink_lifetime4a(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull [[VAR]]) #[[ATTR1]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
@@ -418,9 +418,9 @@ define i32 @sink_lifetime4a(i1 %c) {
;
entry:
%var = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.start.p0(ptr %var)
%var3 = call i32 @unknown(ptr %var) argmemonly nounwind willreturn
- call void @llvm.lifetime.end.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.end.p0(ptr %var)
br i1 %c, label %early_return, label %use_block
early_return:
@@ -436,9 +436,9 @@ define i32 @sink_lifetime4b(i1 %c) {
; CHECK-LABEL: @sink_lifetime4b(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAR:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: [[VAR3:%.*]] = call i32 @unknown(ptr nonnull writeonly [[VAR]]) #[[ATTR1]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[VAR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VAR]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[EARLY_RETURN:%.*]], label [[USE_BLOCK:%.*]]
; CHECK: early_return:
; CHECK-NEXT: ret i32 0
@@ -447,9 +447,9 @@ define i32 @sink_lifetime4b(i1 %c) {
;
entry:
%var = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.start.p0(ptr %var)
%var3 = call i32 @unknown(ptr writeonly %var) argmemonly nounwind willreturn
- call void @llvm.lifetime.end.p0(i64 4, ptr %var)
+ call void @llvm.lifetime.end.p0(ptr %var)
br i1 %c, label %early_return, label %use_block
early_return:
@@ -486,6 +486,6 @@ use_block:
declare i32 @bar()
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll b/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll
index 128edff..758071a 100644
--- a/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll
+++ b/llvm/test/Transforms/InstCombine/trivial-dse-calls.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @unknown()
declare void @f(ptr)
@@ -25,9 +25,9 @@ define void @test_lifetime() {
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -40,11 +40,11 @@ define void @test_lifetime2() {
; CHECK-NEXT: ret void
;
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @unknown()
call void @f(ptr writeonly nocapture %a) argmemonly nounwind willreturn
call void @unknown()
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/unreachable-alloca-lifetime-markers.ll b/llvm/test/Transforms/InstCombine/unreachable-alloca-lifetime-markers.ll
index ab744c62..9c64bfb 100644
--- a/llvm/test/Transforms/InstCombine/unreachable-alloca-lifetime-markers.ll
+++ b/llvm/test/Transforms/InstCombine/unreachable-alloca-lifetime-markers.ll
@@ -12,7 +12,7 @@ define void @pr150338(ptr %arg) {
%a = alloca i32
store ptr %a, ptr %arg
store i1 true, ptr poison
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -33,7 +33,7 @@ entry:
bb1:
%phi1 = phi ptr [ null, %entry ], [ %phi2, %bb2 ]
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
br label %bb2
bb2:
@@ -45,7 +45,7 @@ define void @lifetime_poison() {
; CHECK-LABEL: define void @lifetime_poison() {
; CHECK-NEXT: ret void
;
- call void @llvm.lifetime.start.p0(i64 4, ptr poison)
- call void @llvm.lifetime.end.p0(i64 4, ptr poison)
+ call void @llvm.lifetime.start.p0(ptr poison)
+ call void @llvm.lifetime.end.p0(ptr poison)
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/vararg.ll b/llvm/test/Transforms/InstCombine/vararg.ll
index eb24256..93d230d 100644
--- a/llvm/test/Transforms/InstCombine/vararg.ll
+++ b/llvm/test/Transforms/InstCombine/vararg.ll
@@ -12,14 +12,14 @@ define void @func(ptr nocapture readnone %fmt, ...) {
entry:
%va0 = alloca %struct.__va_list, align 8
%va1 = alloca %struct.__va_list, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %va0)
+ call void @llvm.lifetime.start.p0(ptr %va0)
call void @llvm.va_start(ptr %va0)
- call void @llvm.lifetime.start.p0(i64 32, ptr %va1)
+ call void @llvm.lifetime.start.p0(ptr %va1)
call void @llvm.va_copy(ptr %va1, ptr %va0)
call void @llvm.va_end(ptr %va1)
- call void @llvm.lifetime.end.p0(i64 32, ptr %va1)
+ call void @llvm.lifetime.end.p0(ptr %va1)
call void @llvm.va_end(ptr %va0)
- call void @llvm.lifetime.end.p0(i64 32, ptr %va0)
+ call void @llvm.lifetime.end.p0(ptr %va0)
ret void
}
@@ -31,28 +31,28 @@ define void @func_destroy_copy_src(ptr nocapture readnone %fmt, ...) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[VA0:%.*]] = alloca [[STRUCT___VA_LIST:%.*]], align 8
; CHECK-NEXT: [[VA1:%.*]] = alloca [[STRUCT___VA_LIST]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VA0]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[VA1]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VA0]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[VA1]])
; CHECK-NEXT: call void @llvm.va_start.p0(ptr nonnull [[VA0]])
; CHECK-NEXT: call void @llvm.va_copy.p0(ptr nonnull [[VA1]], ptr nonnull [[VA0]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA0]])
; CHECK-NEXT: call void @callee(ptr nonnull [[VA1]])
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VA1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VA1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[VA0]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VA1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[VA0]])
; CHECK-NEXT: ret void
;
entry:
%va0 = alloca %struct.__va_list, align 8
%va1 = alloca %struct.__va_list, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %va0)
- call void @llvm.lifetime.start.p0(i64 32, ptr %va1)
+ call void @llvm.lifetime.start.p0(ptr %va0)
+ call void @llvm.lifetime.start.p0(ptr %va1)
call void @llvm.va_start(ptr %va0)
call void @llvm.va_copy(ptr %va1, ptr %va0)
call void @llvm.va_end(ptr %va0)
call void @callee(ptr %va1)
call void @llvm.va_end(ptr %va1)
- call void @llvm.lifetime.end.p0(i64 32, ptr %va1)
- call void @llvm.lifetime.end.p0(i64 32, ptr %va0)
+ call void @llvm.lifetime.end.p0(ptr %va1)
+ call void @llvm.lifetime.end.p0(ptr %va0)
ret void
}
diff --git a/llvm/test/Transforms/LICM/dropped-tbaa.ll b/llvm/test/Transforms/LICM/dropped-tbaa.ll
index 11083b4..92839f1 100644
--- a/llvm/test/Transforms/LICM/dropped-tbaa.ll
+++ b/llvm/test/Transforms/LICM/dropped-tbaa.ll
@@ -24,7 +24,7 @@ define void @foo(ptr %data, ptr %addend) #0 {
; CHECK-NEXT: [[CONV_I:%.*]] = sitofp i32 [[TMP2]] to double
entry:
%i = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.start.p0(ptr %i) #2
store i32 0, ptr %i, align 4, !tbaa !1
br i1 true, label %for.body.lr.ph, label %for.cond.cleanup
@@ -35,7 +35,7 @@ for.cond.for.cond.cleanup_crit_edge: ; preds = %for.inc
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond.for.cond.cleanup_crit_edge, %entry
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.end.p0(ptr %i) #2
br label %for.end
for.body: ; preds = %for.body.lr.ph, %for.inc
@@ -67,8 +67,8 @@ for.end: ; preds = %for.cond.cleanup
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
attributes #0 = { argmemonly nounwind }
diff --git a/llvm/test/Transforms/LICM/hoisting-preheader-debugloc.ll b/llvm/test/Transforms/LICM/hoisting-preheader-debugloc.ll
index 61f0eb19..0aa56d2 100644
--- a/llvm/test/Transforms/LICM/hoisting-preheader-debugloc.ll
+++ b/llvm/test/Transforms/LICM/hoisting-preheader-debugloc.ll
@@ -17,7 +17,7 @@ declare i16 @e(i32)
define i16 @g() !dbg !13 {
entry:
%l_284 = alloca [2 x [3 x [6 x i32]]], align 16
- call void @llvm.lifetime.start.p0(i64 144, ptr nonnull %l_284), !dbg !24
+ call void @llvm.lifetime.start.p0(ptr nonnull %l_284), !dbg !24
call void @llvm.dbg.declare(metadata ptr %l_284, metadata !17, metadata !DIExpression()), !dbg !25
%0 = load i16, ptr @a, align 2, !dbg !26, !tbaa !29
%cmp11 = icmp sgt i16 %0, -1, !dbg !33
@@ -51,15 +51,15 @@ for.body.cleanup_crit_edge: ; preds = %for.body
br label %cleanup, !dbg !38
cleanup: ; preds = %for.body.cleanup_crit_edge, %for.cond.cleanup_crit_edge, %entry
- call void @llvm.lifetime.end.p0(i64 144, ptr nonnull %l_284), !dbg !51
+ call void @llvm.lifetime.end.p0(ptr nonnull %l_284), !dbg !51
ret i16 1, !dbg !51
}
; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn
declare void @llvm.dbg.value(metadata, metadata, metadata) #0
diff --git a/llvm/test/Transforms/LICM/loopsink-pr38462.ll b/llvm/test/Transforms/LICM/loopsink-pr38462.ll
index 51eee1f..8b2ff10 100644
--- a/llvm/test/Transforms/LICM/loopsink-pr38462.ll
+++ b/llvm/test/Transforms/LICM/loopsink-pr38462.ll
@@ -37,7 +37,7 @@ __except:
catchret from %1 to label %__except3
__except3:
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %s)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s)
%call.i = call zeroext i1 @g(ptr nonnull %s)
br i1 %call.i, label %if.then.i, label %exit
@@ -46,7 +46,7 @@ if.then.i:
br label %exit
exit:
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %s)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s)
br label %__try.cont
__try.cont:
@@ -58,8 +58,8 @@ __try.cont:
declare i32 @__C_specific_handler(...)
declare i32 @f()
declare zeroext i1 @g(ptr)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!1 = !{!"function_entry_count", i64 1}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/lifetime-use.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/lifetime-use.ll
index c7a0de22..970643a 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/lifetime-use.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/lifetime-use.ll
@@ -6,7 +6,7 @@ define void @test(ptr %p, i64 %idx) {
; CHECK-SAME: ptr [[P:%.*]], i64 [[IDX:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [4 x [4 x i32]], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ALLOCA]])
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[IDX]], 6
; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 48
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP1]]
@@ -31,12 +31,12 @@ define void @test(ptr %p, i64 %idx) {
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[LSR_IV_NEXT]], 0
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOCA]])
; CHECK-NEXT: ret void
;
entry:
%alloca = alloca [4 x [4 x i32]], align 16
- call void @llvm.lifetime.start.p0(i64 64, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
br label %loop
loop:
@@ -54,6 +54,6 @@ loop:
br i1 %exitcond.not, label %exit, label %loop
exit:
- call void @llvm.lifetime.end.p0(i64 64, ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
ret void
}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/lsr-comp-time.ll b/llvm/test/Transforms/LoopStrengthReduce/lsr-comp-time.ll
index 3ec4fea..f8c5e82 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/lsr-comp-time.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/lsr-comp-time.ll
@@ -5,10 +5,10 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind readnone uwtable
define dso_local i32 @foo(i32 %arg, i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6) local_unnamed_addr #3 {
@@ -83,31 +83,31 @@ bb:
%tmp16 = alloca [100 x [100 x i32]], align 16
%tmp17 = alloca [100 x [100 x i32]], align 16
%tmp18 = alloca [100 x [100 x i32]], align 16
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp, i8 0, i64 400, i1 false)
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp7) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp7) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp7, i8 0, i64 400, i1 false)
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp8) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp8) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp8, i8 0, i64 400, i1 false)
- call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp9) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp9) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp9, i8 0, i64 40000, i1 false)
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp10) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp10) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp10, i8 0, i64 400, i1 false)
- call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp11) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp11) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp11, i8 0, i64 40000, i1 false)
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp12) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp12) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp12, i8 0, i64 400, i1 false)
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp13) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp13) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp13, i8 0, i64 400, i1 false)
- call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp14) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp14) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp14, i8 0, i64 40000, i1 false)
- call void @llvm.lifetime.start.p0(i64 400, ptr nonnull %tmp15) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp15) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp15, i8 0, i64 400, i1 false)
- call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp16) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp16) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp16, i8 0, i64 40000, i1 false)
- call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp17) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp17) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp17, i8 0, i64 40000, i1 false)
- call void @llvm.lifetime.start.p0(i64 40000, ptr nonnull %tmp18) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %tmp18) #4
call void @llvm.memset.p0.i64(ptr nonnull align 16 %tmp18, i8 0, i64 40000, i1 false)
%tmp32 = getelementptr inbounds [100 x i32], ptr %tmp8, i64 0, i64 3
br label %bb33
@@ -1300,19 +1300,19 @@ bb1051: ; preds = %bb1007
%tmp1063 = sub i32 %tmp1062, %tmp960
%tmp1064 = add i32 %tmp1063, %tmp1004
%tmp1065 = sub i32 %tmp1064, %tmp1048
- call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp18) #4
- call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp17) #4
- call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp16) #4
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp15) #4
- call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp14) #4
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp13) #4
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp12) #4
- call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp11) #4
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp10) #4
- call void @llvm.lifetime.end.p0(i64 40000, ptr nonnull %tmp9) #4
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp8) #4
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp7) #4
- call void @llvm.lifetime.end.p0(i64 400, ptr nonnull %tmp) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp18) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp17) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp16) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp15) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp14) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp13) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp12) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp11) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp10) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp9) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp8) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp7) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %tmp) #4
ret i32 %tmp1065
}
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll
index f4102ff..fe3504b 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-known-trip-count.ll
@@ -239,8 +239,8 @@ for.body: ; preds = %entry, %for.body
define dso_local i32 @predicated_test(i32 noundef %0, ptr %glob) #0 {
%2 = alloca [101 x i32], align 4
%3 = alloca [21 x i32], align 4
- call void @llvm.lifetime.start.p0(i64 404, ptr nonnull %2)
- call void @llvm.lifetime.start.p0(i64 84, ptr nonnull %3)
+ call void @llvm.lifetime.start.p0(ptr nonnull %2)
+ call void @llvm.lifetime.start.p0(ptr nonnull %3)
%4 = icmp sgt i32 %0, 0
br i1 %4, label %5, label %159
@@ -433,8 +433,8 @@ define dso_local i32 @predicated_test(i32 noundef %0, ptr %glob) #0 {
br label %159
159: ; preds = %158, %1
- call void @llvm.lifetime.end.p0(i64 84, ptr nonnull %3)
- call void @llvm.lifetime.end.p0(i64 404, ptr nonnull %2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %3)
+ call void @llvm.lifetime.end.p0(ptr nonnull %2)
ret i32 0
}
@@ -472,7 +472,7 @@ while.end: ; preds = %while.end.loopexit,
}
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
attributes #0 = { "target-features"="+mve.fp" }
diff --git a/llvm/test/Transforms/LoopVectorize/Hexagon/minimum-vf.ll b/llvm/test/Transforms/LoopVectorize/Hexagon/minimum-vf.ll
index 1ac556a..26bab4d 100644
--- a/llvm/test/Transforms/LoopVectorize/Hexagon/minimum-vf.ll
+++ b/llvm/test/Transforms/LoopVectorize/Hexagon/minimum-vf.ll
@@ -11,14 +11,14 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i
@g0 = external dso_local local_unnamed_addr global ptr, align 4
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
; Function Attrs: nounwind
define hidden fastcc void @f0(ptr nocapture %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i8 zeroext %a5) unnamed_addr #1 {
b0:
%v0 = alloca [4 x [9 x i16]], align 8
- call void @llvm.lifetime.start.p0(i64 72, ptr nonnull %v0) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %v0) #2
%v2 = add i32 %a1, -2
%v3 = add i32 %a3, -9
%v4 = icmp ugt i32 %v2, %v3
@@ -147,7 +147,7 @@ b1: ; preds = %b1, %b0
br i1 %v120, label %b2, label %b1
b2: ; preds = %b1, %b0
- call void @llvm.lifetime.end.p0(i64 72, ptr nonnull %v0) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %v0) #2
ret void
}
diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-only-for-real.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-only-for-real.ll
index 1bacb57..6b72f20 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-only-for-real.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-only-for-real.ll
@@ -10,11 +10,11 @@ define zeroext i32 @test() #0 {
entry:
%a = alloca [1600 x i32], align 4
%c = alloca [1600 x i32], align 4
- call void @llvm.lifetime.start(i64 6400, ptr %a) #3
+ call void @llvm.lifetime.start(ptr %a) #3
br label %for.body
for.cond.cleanup: ; preds = %for.body
- call void @llvm.lifetime.start(i64 6400, ptr %c) #3
+ call void @llvm.lifetime.start(ptr %c) #3
%call = call signext i32 @bar(ptr %a, ptr %c) #3
br label %for.body6
@@ -28,8 +28,8 @@ for.body: ; preds = %for.body, %entry
br i1 %exitcond27, label %for.cond.cleanup, label %for.body
for.cond.cleanup5: ; preds = %for.body6
- call void @llvm.lifetime.end(i64 6400, ptr nonnull %c) #3
- call void @llvm.lifetime.end(i64 6400, ptr %a) #3
+ call void @llvm.lifetime.end(ptr nonnull %c) #3
+ call void @llvm.lifetime.end(ptr %a) #3
ret i32 %add
for.body6: ; preds = %for.body6, %for.cond.cleanup
@@ -44,10 +44,10 @@ for.body6: ; preds = %for.body6, %for.con
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start(ptr nocapture) #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end(ptr nocapture) #1
declare signext i32 @bar(ptr, ptr) #2
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
index 162440a..be6e918 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s -check-prefix=NO-ZVFBFMIN
+; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFBFMIN
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFBFMIN
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfbfmin -S | FileCheck %s -check-prefix=ZVFBFMIN
@@ -25,37 +26,39 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; ZVFBFMIN-LABEL: define void @fadd(
; ZVFBFMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; ZVFBFMIN-NEXT: [[ENTRY:.*]]:
-; ZVFBFMIN-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFBFMIN-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8
-; ZVFBFMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP8]]
-; ZVFBFMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; ZVFBFMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; ZVFBFMIN: [[VECTOR_PH]]:
; ZVFBFMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; ZVFBFMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; ZVFBFMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]]
-; ZVFBFMIN-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; ZVFBFMIN-NEXT: [[TMP3:%.*]] = sub i64 [[TMP10]], 1
+; ZVFBFMIN-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP3]]
+; ZVFBFMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]]
+; ZVFBFMIN-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; ZVFBFMIN-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
; ZVFBFMIN-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP12]], 8
; ZVFBFMIN-NEXT: br label %[[VECTOR_BODY:.*]]
; ZVFBFMIN: [[VECTOR_BODY]]:
-; ZVFBFMIN-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFBFMIN-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFBFMIN-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFBFMIN-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; ZVFBFMIN-NEXT: [[TMP1:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[TMP0]]
; ZVFBFMIN-NEXT: [[TMP2:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[TMP0]]
-; ZVFBFMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP1]], align 2
-; ZVFBFMIN-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP2]], align 2
+; ZVFBFMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x bfloat> @llvm.vp.load.nxv8bf16.p0(ptr align 2 [[TMP1]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
+; ZVFBFMIN-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 8 x bfloat> @llvm.vp.load.nxv8bf16.p0(ptr align 2 [[TMP2]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
; ZVFBFMIN-NEXT: [[TMP11:%.*]] = fadd <vscale x 8 x bfloat> [[WIDE_LOAD]], [[WIDE_LOAD1]]
-; ZVFBFMIN-NEXT: store <vscale x 8 x bfloat> [[TMP11]], ptr [[TMP1]], align 2
-; ZVFBFMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP0]], [[TMP5]]
-; ZVFBFMIN-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; ZVFBFMIN-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; ZVFBFMIN-NEXT: call void @llvm.vp.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[TMP11]], ptr align 2 [[TMP1]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
+; ZVFBFMIN-NEXT: [[TMP13:%.*]] = zext i32 [[TMP6]] to i64
+; ZVFBFMIN-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[TMP0]]
+; ZVFBFMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; ZVFBFMIN-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; ZVFBFMIN-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; ZVFBFMIN: [[MIDDLE_BLOCK]]:
-; ZVFBFMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; ZVFBFMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; ZVFBFMIN-NEXT: br label %[[EXIT:.*]]
; ZVFBFMIN: [[SCALAR_PH]]:
-; ZVFBFMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; ZVFBFMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; ZVFBFMIN-NEXT: br label %[[LOOP:.*]]
; ZVFBFMIN: [[LOOP]]:
-; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
+; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
; ZVFBFMIN-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]]
; ZVFBFMIN-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]]
; ZVFBFMIN-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP]], align 2
@@ -64,7 +67,7 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; ZVFBFMIN-NEXT: store bfloat [[Z]], ptr [[A_GEP]], align 2
; ZVFBFMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1
; ZVFBFMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; ZVFBFMIN: [[EXIT]]:
; ZVFBFMIN-NEXT: ret void
;
@@ -137,41 +140,43 @@ define void @vfwmaccbf16.vv(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64
; ZVFBFMIN-LABEL: define void @vfwmaccbf16.vv(
; ZVFBFMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; ZVFBFMIN-NEXT: [[ENTRY:.*]]:
-; ZVFBFMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFBFMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; ZVFBFMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; ZVFBFMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; ZVFBFMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; ZVFBFMIN: [[VECTOR_PH]]:
; ZVFBFMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; ZVFBFMIN-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; ZVFBFMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; ZVFBFMIN-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; ZVFBFMIN-NEXT: [[TMP10:%.*]] = sub i64 [[TMP3]], 1
+; ZVFBFMIN-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP10]]
+; ZVFBFMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; ZVFBFMIN-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; ZVFBFMIN-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; ZVFBFMIN-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; ZVFBFMIN-NEXT: br label %[[VECTOR_BODY:.*]]
; ZVFBFMIN: [[VECTOR_BODY]]:
-; ZVFBFMIN-NEXT: [[TMP6:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFBFMIN-NEXT: [[TMP6:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFBFMIN-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFBFMIN-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; ZVFBFMIN-NEXT: [[TMP7:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[TMP6]]
; ZVFBFMIN-NEXT: [[TMP8:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[TMP6]]
; ZVFBFMIN-NEXT: [[TMP9:%.*]] = getelementptr float, ptr [[C]], i64 [[TMP6]]
-; ZVFBFMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x bfloat>, ptr [[TMP7]], align 2
-; ZVFBFMIN-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x bfloat>, ptr [[TMP8]], align 2
-; ZVFBFMIN-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>, ptr [[TMP9]], align 4
+; ZVFBFMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x bfloat> @llvm.vp.load.nxv4bf16.p0(ptr align 2 [[TMP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; ZVFBFMIN-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 4 x bfloat> @llvm.vp.load.nxv4bf16.p0(ptr align 2 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; ZVFBFMIN-NEXT: [[WIDE_LOAD2:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; ZVFBFMIN-NEXT: [[TMP13:%.*]] = fpext <vscale x 4 x bfloat> [[WIDE_LOAD]] to <vscale x 4 x float>
; ZVFBFMIN-NEXT: [[TMP14:%.*]] = fpext <vscale x 4 x bfloat> [[WIDE_LOAD1]] to <vscale x 4 x float>
; ZVFBFMIN-NEXT: [[TMP15:%.*]] = call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[TMP13]], <vscale x 4 x float> [[TMP14]], <vscale x 4 x float> [[WIDE_LOAD2]])
-; ZVFBFMIN-NEXT: store <vscale x 4 x float> [[TMP15]], ptr [[TMP9]], align 4
-; ZVFBFMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP6]], [[TMP5]]
-; ZVFBFMIN-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; ZVFBFMIN-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; ZVFBFMIN-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[TMP15]], ptr align 4 [[TMP9]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; ZVFBFMIN-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+; ZVFBFMIN-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP12]], [[TMP6]]
+; ZVFBFMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]]
+; ZVFBFMIN-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; ZVFBFMIN-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; ZVFBFMIN: [[MIDDLE_BLOCK]]:
-; ZVFBFMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; ZVFBFMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; ZVFBFMIN-NEXT: br label %[[EXIT:.*]]
; ZVFBFMIN: [[SCALAR_PH]]:
-; ZVFBFMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; ZVFBFMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; ZVFBFMIN-NEXT: br label %[[LOOP:.*]]
; ZVFBFMIN: [[LOOP]]:
-; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
+; ZVFBFMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
; ZVFBFMIN-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]]
; ZVFBFMIN-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]]
; ZVFBFMIN-NEXT: [[C_GEP:%.*]] = getelementptr float, ptr [[C]], i64 [[I]]
@@ -184,7 +189,7 @@ define void @vfwmaccbf16.vv(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64
; ZVFBFMIN-NEXT: store float [[FMULADD]], ptr [[C_GEP]], align 4
; ZVFBFMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1
; ZVFBFMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
+; ZVFBFMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
; ZVFBFMIN: [[EXIT]]:
; ZVFBFMIN-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll
index 75ae6df..9f7ac7a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll
@@ -62,50 +62,10 @@ define i32 @any_of_reduction_used_in_blend_with_multiple_phis(ptr %src, i64 %N,
; CHECK-LABEL: define i32 @any_of_reduction_used_in_blend_with_multiple_phis(
; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]], i1 [[C_0:%.*]], i1 [[C_1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i1> poison, i1 [[C_1]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i1> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i1> poison, i1 [[C_0]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i1> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = xor <vscale x 2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
-; CHECK-NEXT: [[TMP7:%.*]] = xor <vscale x 2 x i1> [[BROADCAST_SPLAT2]], splat (i1 true)
-; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i1> [[TMP7]], <vscale x 2 x i1> zeroinitializer
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[SRC]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
-; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PREDPHI:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x ptr> @llvm.masked.gather.nxv2p0.nxv2p0(<vscale x 2 x ptr> [[BROADCAST_SPLAT4]], i32 8, <vscale x 2 x i1> [[TMP8]], <vscale x 2 x ptr> poison)
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 2 x ptr> [[WIDE_MASKED_GATHER]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = or <vscale x 2 x i1> [[VEC_PHI]], [[TMP9]]
-; CHECK-NEXT: [[PREDPHI]] = select <vscale x 2 x i1> [[TMP8]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.or.nxv2i1(<vscale x 2 x i1> [[PREDPHI]])
-; CHECK-NEXT: [[TMP13:%.*]] = freeze i1 [[TMP12]]
-; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP13]], i32 0, i32 0
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
-; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[ANY_OF_RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ANY_OF_RED_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[ANY_OF_RED:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ANY_OF_RED_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: br i1 [[C_0]], label %[[X_1:.*]], label %[[ELSE_1:.*]]
; CHECK: [[ELSE_1]]:
; CHECK-NEXT: br i1 [[C_1]], label %[[X_1]], label %[[ELSE_2:.*]]
@@ -121,9 +81,9 @@ define i32 @any_of_reduction_used_in_blend_with_multiple_phis(ptr %src, i64 %N,
; CHECK-NEXT: [[ANY_OF_RED_NEXT]] = phi i32 [ [[P]], %[[X_1]] ], [ [[SEL]], %[[ELSE_2]] ]
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]]
; CHECK: [[EXIT]]:
-; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[ANY_OF_RED_NEXT]], %[[LOOP_LATCH]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[ANY_OF_RED_NEXT]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: ret i32 [[RES]]
;
entry:
@@ -159,9 +119,3 @@ exit:
}
attributes #0 = { "target-cpu"="sifive-p670" }
-;.
-; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
index aad9128..3a77d2e5 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
@@ -11,45 +11,46 @@ define void @block_with_dead_inst_1(ptr %src, i64 %N) #0 {
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -3
; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], [[TMP4]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i64 [[TMP6]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP8]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP2]], [[TMP7]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
; CHECK-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP11]], splat (i64 3)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]]
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP10]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP17]]
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP20]], i32 2, <vscale x 8 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP20]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP14]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 1, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i16 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -62,7 +63,7 @@ define void @block_with_dead_inst_1(ptr %src, i64 %N) #0 {
; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[TMP25]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP25]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -96,45 +97,46 @@ define void @block_with_dead_inst_2(ptr %src) #0 {
; CHECK-LABEL: define void @block_with_dead_inst_2(
; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 333, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 333, [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 333, [[TMP5]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[TMP1]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 333, [[TMP3]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
-; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 3)
-; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]]
-; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP6]], 8
+; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
+; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 8 x i64> [[TMP5]], splat (i64 3)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP8]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> zeroinitializer, <vscale x 4 x ptr> [[TMP17]], i32 2, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 333, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP13]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP9]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 333
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i16 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -147,7 +149,7 @@ define void @block_with_dead_inst_2(ptr %src) #0 {
; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -181,45 +183,46 @@ define void @multiple_blocks_with_dead_insts_3(ptr %src) #0 {
; CHECK-LABEL: define void @multiple_blocks_with_dead_insts_3(
; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 333, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 333, [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 333, [[TMP5]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[TMP1]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 333, [[TMP3]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
-; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 3)
-; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]]
-; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP6]], 8
+; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
+; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 8 x i64> [[TMP5]], splat (i64 3)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP8]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> zeroinitializer, <vscale x 4 x ptr> [[TMP17]], i32 2, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 333, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP13]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP9]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 333
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i16 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -235,7 +238,7 @@ define void @multiple_blocks_with_dead_insts_3(ptr %src) #0 {
; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -276,45 +279,46 @@ define void @multiple_blocks_with_dead_insts_4(ptr %src, i64 %N) #0 {
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -3
; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], [[TMP4]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i64 [[TMP6]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP8]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP2]], [[TMP7]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
; CHECK-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP11]], splat (i64 3)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]]
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP10]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP17]]
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP20]], i32 2, <vscale x 8 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP20]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP14]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 1, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i16 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -332,7 +336,7 @@ define void @multiple_blocks_with_dead_insts_4(ptr %src, i64 %N) #0 {
; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -373,45 +377,46 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_5(ptr %src) #0 {
; CHECK-LABEL: define void @multiple_blocks_with_dead_inst_multiple_successors_5(
; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 333, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 333, [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 333, [[TMP5]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[TMP1]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 333, [[TMP3]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
-; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
-; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 3)
-; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]]
-; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP6]], 8
+; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
+; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 8 x i64> [[TMP5]], splat (i64 3)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP8]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16> zeroinitializer, <vscale x 4 x ptr> [[TMP17]], i32 2, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 333, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[TMP13:%.*]] = mul i64 3, [[TMP7]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP13]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP9]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 333
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 1, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i16 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -431,7 +436,7 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_5(ptr %src) #0 {
; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -478,45 +483,62 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 %
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -3
; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], [[TMP4]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]]
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i64 [[TMP6]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP8]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP2]], [[TMP7]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i1> poison, i1 [[IC]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i1> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = xor <vscale x 8 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
; CHECK-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP11]], splat (i64 3)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]]
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP10]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP27:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[TMP27]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP27]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = mul i64 3, [[TMP12]]
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP16]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
+; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 8 x i32> [[TMP14]], [[BROADCAST_SPLAT4]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], <vscale x 8 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP20]], i32 2, <vscale x 8 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 8 x i16> @llvm.vp.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> align 2 [[TMP20]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP27]])
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq <vscale x 8 x i16> [[WIDE_MASKED_GATHER]], zeroinitializer
+; CHECK-NEXT: [[TMP18:%.*]] = select <vscale x 8 x i1> [[TMP15]], <vscale x 8 x i1> [[TMP17]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP19:%.*]] = select <vscale x 8 x i1> [[TMP18]], <vscale x 8 x i1> [[TMP8]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP28:%.*]] = xor <vscale x 8 x i1> [[TMP17]], splat (i1 true)
+; CHECK-NEXT: [[TMP21:%.*]] = select <vscale x 8 x i1> [[TMP15]], <vscale x 8 x i1> [[TMP28]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = or <vscale x 8 x i1> [[TMP19]], [[TMP21]]
+; CHECK-NEXT: [[TMP23:%.*]] = select <vscale x 8 x i1> [[TMP18]], <vscale x 8 x i1> [[BROADCAST_SPLAT]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = or <vscale x 8 x i1> [[TMP22]], [[TMP23]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP20]], <vscale x 8 x i1> [[TMP24]], i32 [[TMP27]])
+; CHECK-NEXT: [[TMP25:%.*]] = zext i32 [[TMP27]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP25]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 1, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i16 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i16 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -536,7 +558,7 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 %
; CHECK-NEXT: store i16 [[XOR]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -580,38 +602,45 @@ define void @empty_block_with_phi_1(ptr %src, i64 %N) #0 {
; CHECK-LABEL: define void @empty_block_with_phi_1(
; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[TMP9:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[TMP13]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ult <vscale x 8 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[TMP9]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP10]], align 2
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <vscale x 8 x i16> [[WIDE_LOAD]], zeroinitializer
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP12]], <vscale x 8 x i16> splat (i16 99), <vscale x 8 x i16> [[WIDE_LOAD]]
-; CHECK-NEXT: store <vscale x 8 x i16> [[PREDPHI]], ptr [[TMP10]], align 2
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP9]], [[TMP5]]
-; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = icmp ne <vscale x 8 x i16> [[VP_OP_LOAD]], zeroinitializer
+; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x i1> [[TMP14]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP15]], <vscale x 8 x i16> [[VP_OP_LOAD]], <vscale x 8 x i16> splat (i16 99)
+; CHECK-NEXT: call void @llvm.vp.store.nxv8i16.p0(<vscale x 8 x i16> [[PREDPHI]], ptr align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[TMP9]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 1, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i32 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -624,7 +653,7 @@ define void @empty_block_with_phi_1(ptr %src, i64 %N) #0 {
; CHECK-NEXT: store i16 [[P]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[TMP17]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -658,38 +687,45 @@ define void @empty_block_with_phi_2(ptr %src, i64 %N) #0 {
; CHECK-LABEL: define void @empty_block_with_phi_2(
; CHECK-SAME: ptr [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[TMP9:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[TMP13]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ult <vscale x 8 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[TMP9]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP10]], align 2
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq <vscale x 8 x i16> [[WIDE_LOAD]], zeroinitializer
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP12]], <vscale x 8 x i16> [[WIDE_LOAD]], <vscale x 8 x i16> splat (i16 99)
-; CHECK-NEXT: store <vscale x 8 x i16> [[PREDPHI]], ptr [[TMP10]], align 2
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP9]], [[TMP5]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: [[TMP14:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x i1> [[TMP12]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 8 x i1> [[TMP14]], <vscale x 8 x i16> [[WIDE_LOAD]], <vscale x 8 x i16> splat (i16 99)
+; CHECK-NEXT: call void @llvm.vp.store.nxv8i16.p0(<vscale x 8 x i16> [[PREDPHI]], ptr align 2 [[TMP10]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[TMP9]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 0, %[[MIDDLE_BLOCK]] ], [ 1, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 1, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
-; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[XOR1315:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[LOOP_LATCH]] ]
; CHECK-NEXT: [[XOR]] = xor i32 0, 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
@@ -702,7 +738,7 @@ define void @empty_block_with_phi_2(ptr %src, i64 %N) #0 {
; CHECK-NEXT: store i16 [[P]], ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[TMP18]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP18]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -743,11 +779,7 @@ define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], 3
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[UMIN7]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], 1
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.umax.i64(i64 40, i64 [[TMP5]])
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], [[TMP6]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[N_EXT]], i64 1)
; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[N_EXT]], [[UMIN]]
@@ -771,33 +803,38 @@ define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 {
; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP14]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], [[TMP15]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP14]], 4
+; CHECK-NEXT: [[TMP20:%.*]] = sub i64 [[TMP19]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP3]], [[TMP20]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP19]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP16]], 2
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
-; CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
-; CHECK-NEXT: [[TMP20:%.*]] = mul <vscale x 2 x i64> [[TMP18]], splat (i64 3)
-; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP20]]
-; CHECK-NEXT: [[TMP23:%.*]] = mul i64 3, [[TMP17]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP23]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP16]], 4
+; CHECK-NEXT: [[TMP24:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; CHECK-NEXT: [[TMP25:%.*]] = mul <vscale x 4 x i64> [[TMP24]], splat (i64 3)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP25]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 2 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> zeroinitializer, <vscale x 2 x ptr> [[TMP24]], i32 4, <vscale x 2 x i1> splat (i1 true)), !alias.scope [[META18:![0-9]+]], !noalias [[META21:![0-9]+]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP17]]
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP3]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP18:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP18]] to i64
+; CHECK-NEXT: [[TMP23:%.*]] = mul i64 3, [[TMP17]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP23]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x ptr> align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP18]]), !alias.scope [[META19:![0-9]+]], !noalias [[META22:![0-9]+]]
+; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP18]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP3]]
+; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
@@ -813,7 +850,7 @@ define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 {
; CHECK-NEXT: store i32 0, ptr [[GEP_DST]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV]], [[N_EXT]]
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP25:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -845,30 +882,31 @@ exit:
attributes #0 = { "target-features"="+64bit,+v" }
;.
-; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
-; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
-; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
-; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
-; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
-; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]}
-; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
-; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]}
-; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
-; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META2]], [[META1]]}
-; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]}
-; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META2]], [[META1]]}
-; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]}
-; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META2]], [[META1]]}
-; CHECK: [[META18]] = !{[[META19:![0-9]+]]}
-; CHECK: [[META19]] = distinct !{[[META19]], [[META20:![0-9]+]]}
-; CHECK: [[META20]] = distinct !{[[META20]], !"LVerDomain"}
-; CHECK: [[META21]] = !{[[META22:![0-9]+]], [[META23:![0-9]+]]}
-; CHECK: [[META22]] = distinct !{[[META22]], [[META20]]}
-; CHECK: [[META23]] = distinct !{[[META23]], [[META20]]}
-; CHECK: [[LOOP24]] = distinct !{[[LOOP24]], [[META1]], [[META2]]}
-; CHECK: [[LOOP25]] = distinct !{[[LOOP25]], [[META1]]}
+; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]}
+; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META3]], [[META1]]}
+; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META3]], [[META1]]}
+; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META3]], [[META1]]}
+; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META3]], [[META1]]}
+; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META3]], [[META1]]}
+; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META3]], [[META1]]}
+; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META3]], [[META1]]}
+; CHECK: [[META19]] = !{[[META20:![0-9]+]]}
+; CHECK: [[META20]] = distinct !{[[META20]], [[META21:![0-9]+]]}
+; CHECK: [[META21]] = distinct !{[[META21]], !"LVerDomain"}
+; CHECK: [[META22]] = !{[[META23:![0-9]+]], [[META24:![0-9]+]]}
+; CHECK: [[META23]] = distinct !{[[META23]], [[META21]]}
+; CHECK: [[META24]] = distinct !{[[META24]], [[META21]]}
+; CHECK: [[LOOP25]] = distinct !{[[LOOP25]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP26]] = distinct !{[[LOOP26]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
index ab8875b..18757a7 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
@@ -18,52 +18,52 @@ define void @dead_load(ptr %p, i16 %start) {
; CHECK-NEXT: [[TMP3:%.*]] = udiv i64 [[TMP2]], 3
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[UMIN]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 1
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP5]], [[TMP7]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP5]], [[TMP9]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i64 [[TMP9]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP5]], [[TMP11]]
+; CHECK-NEXT: [[TMP10:%.*]] = sub i64 [[TMP9]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP5]], [[TMP10]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP9]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 8
-; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[N_VEC]], 3
-; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[START_EXT]], [[TMP18]]
; CHECK-NEXT: [[TMP15:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[START_EXT]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP17:%.*]] = mul <vscale x 8 x i64> [[TMP15]], splat (i64 3)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> [[DOTSPLAT]], [[TMP17]]
-; CHECK-NEXT: [[TMP20:%.*]] = mul i64 3, [[TMP14]]
-; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP20]], i64 0
-; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT1]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP5]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP16]] to i64
+; CHECK-NEXT: [[TMP20:%.*]] = mul i64 3, [[TMP19]]
+; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP20]], i64 0
+; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT1]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[P]], <vscale x 8 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP21]], i32 2, <vscale x 8 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP14]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP21]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP16]])
+; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP16]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT2]]
-; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP5]]
+; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[START_EXT]], %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[START_EXT]], %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[START_EXT]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 0, ptr [[GEP]], align 2
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IV]], 111
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -94,7 +94,7 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 4
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.umax.i32(i32 8, i32 [[TMP1]])
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.umax.i32(i32 6, i32 [[TMP1]])
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 252, [[TMP2]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
@@ -126,11 +126,11 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) {
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i32> [[VEC_IND]] to <vscale x 4 x i64>
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[DST]], <vscale x 4 x i64> [[TMP15]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> zeroinitializer, <vscale x 4 x ptr> [[TMP16]], i32 1, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META4:![0-9]+]], !noalias [[META7:![0-9]+]]
+; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> zeroinitializer, <vscale x 4 x ptr> [[TMP16]], i32 1, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META5:![0-9]+]], !noalias [[META8:![0-9]+]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP8]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
@@ -145,7 +145,7 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) {
; CHECK-NEXT: store i8 0, ptr [[GEP_DST]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 4
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV]], 1001
-; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[R:%.*]] = phi i8 [ [[L]], %[[LOOP]] ]
; CHECK-NEXT: ret i8 [[R]]
@@ -181,7 +181,7 @@ define i32 @cost_of_exit_branch_and_cond_insts(ptr %a, ptr %b, i1 %c, i16 %x) #0
; CHECK-NEXT: [[TMP1:%.*]] = sub i32 770, [[UMAX3]]
; CHECK-NEXT: [[SMAX4:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP1]], i32 0)
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i32 [[SMAX4]], 1
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP2]], 24
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 [[TMP2]], 19
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 1
@@ -210,48 +210,48 @@ define i32 @cost_of_exit_branch_and_cond_insts(ptr %a, ptr %b, i1 %c, i16 %x) #0
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[B]], i32 [[INDEX]]
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
; CHECK: [[PRED_STORE_IF]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11:![0-9]+]], !noalias [[META14:![0-9]+]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12:![0-9]+]], !noalias [[META15:![0-9]+]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]]
; CHECK: [[PRED_STORE_CONTINUE]]:
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]]
; CHECK: [[PRED_STORE_IF5]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE6]]
; CHECK: [[PRED_STORE_CONTINUE6]]:
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]]
; CHECK: [[PRED_STORE_IF7]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE8]]
; CHECK: [[PRED_STORE_CONTINUE8]]:
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]]
; CHECK: [[PRED_STORE_IF9]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE10]]
; CHECK: [[PRED_STORE_CONTINUE10]]:
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12:.*]]
; CHECK: [[PRED_STORE_IF11]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE12]]
; CHECK: [[PRED_STORE_CONTINUE12]]:
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF13:.*]], label %[[PRED_STORE_CONTINUE14:.*]]
; CHECK: [[PRED_STORE_IF13]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE14]]
; CHECK: [[PRED_STORE_CONTINUE14]]:
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF15:.*]], label %[[PRED_STORE_CONTINUE16:.*]]
; CHECK: [[PRED_STORE_IF15]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE16]]
; CHECK: [[PRED_STORE_CONTINUE16]]:
; CHECK-NEXT: br i1 [[C]], label %[[PRED_STORE_IF17:.*]], label %[[PRED_STORE_CONTINUE18]]
; CHECK: [[PRED_STORE_IF17]]:
-; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META11]], !noalias [[META14]]
+; CHECK-NEXT: store i1 false, ptr [[A]], align 1, !alias.scope [[META12]], !noalias [[META15]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE18]]
; CHECK: [[PRED_STORE_CONTINUE18]]:
-; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> zeroinitializer, ptr [[TMP11]], i32 4, <8 x i1> [[BROADCAST_SPLAT]]), !alias.scope [[META14]]
+; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> zeroinitializer, ptr [[TMP11]], i32 4, <8 x i1> [[BROADCAST_SPLAT]]), !alias.scope [[META15]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
@@ -273,7 +273,7 @@ define i32 @cost_of_exit_branch_and_cond_insts(ptr %a, ptr %b, i1 %c, i16 %x) #0
; CHECK-NEXT: [[EC:%.*]] = icmp slt i32 [[IV]], [[SUB]]
; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_LATCH]], label %[[EXIT:.*]]
; CHECK: [[LOOP_LATCH]]:
-; CHECK-NEXT: br label %[[LOOP_HEADER]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT: br label %[[LOOP_HEADER]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: br label %[[RETURN:.*]]
; CHECK: [[RETURN]]:
@@ -315,45 +315,55 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) {
; CHECK-LABEL: define void @test_phi_in_latch_redundant(
; CHECK-SAME: ptr [[DST:%.*]], i32 [[A:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 37, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 37, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 37, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[TMP1]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 37, [[TMP3]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[A]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[N_VEC]], 9
-; CHECK-NEXT: [[TMP10:%.*]] = xor <vscale x 2 x i32> [[BROADCAST_SPLAT]], splat (i32 -1)
-; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
-; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 9)
-; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP8]]
-; CHECK-NEXT: [[TMP9:%.*]] = mul i64 9, [[TMP5]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP9]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[A]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP19:%.*]] = xor <vscale x 4 x i32> [[BROADCAST_SPLAT]], splat (i32 -1)
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 9)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP7]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 2 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> [[TMP10]], <vscale x 2 x ptr> [[TMP11]], i32 4, <vscale x 2 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 37, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP8]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[TMP9:%.*]] = mul i64 9, [[TMP5]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 4 x i32> [[TMP11]], [[BROADCAST_SPLAT4]]
+; CHECK-NEXT: [[TMP13:%.*]] = select <vscale x 4 x i1> [[TMP12]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = select <vscale x 4 x i1> [[TMP12]], <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP15:%.*]] = or <vscale x 4 x i1> [[TMP13]], [[TMP14]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> [[TMP19]]
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 4 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[PREDPHI]], <vscale x 4 x ptr> align 4 [[TMP16]], <vscale x 4 x i1> [[TMP15]], i32 [[TMP8]])
+; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP17]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
+; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 37
+; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 37, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: br i1 false, label %[[LOOP_LATCH]], label %[[THEN:.*]]
; CHECK: [[THEN]]:
; CHECK-NEXT: [[NOT_A:%.*]] = xor i32 [[A]], -1
@@ -364,7 +374,7 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) {
; CHECK-NEXT: store i32 [[P]], ptr [[GEP]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 9
; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV]], 322
-; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[LOOP_HEADER]], label %[[EXIT]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -400,49 +410,56 @@ define void @gather_interleave_group_with_dead_insert_pos(i64 %N, ptr noalias %s
; CHECK-NEXT: [[TMP0:%.*]] = add nuw i64 [[SMAX]], 1
; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 1
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP4]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP15:%.*]] = sub i64 [[TMP6]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP2]], [[TMP15]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP6]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
-; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[N_VEC]], 2
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP11:%.*]] = mul <vscale x 4 x i64> [[TMP9]], splat (i64 2)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP11]]
-; CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP8]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP12]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i8>, ptr [[TMP15]], align 1
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i8>, <vscale x 4 x i8> } @llvm.vector.deinterleave2.nxv8i8(<vscale x 8 x i8> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i8> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP2]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP10]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP16]]
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP12]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP14:%.*]] = icmp ult <vscale x 4 x i32> [[TMP13]], [[BROADCAST_SPLAT2]]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[EVL_BASED_IV]], 2
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP22]], i32 1, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i8> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i8>, <vscale x 4 x i8> } @llvm.vector.deinterleave2.nxv8i8(<vscale x 8 x i8> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP23:%.*]] = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i8> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i8> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP18:%.*]] = zext <vscale x 4 x i8> [[TMP17]] to <vscale x 4 x i32>
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP18]], <vscale x 4 x ptr> [[TMP19]], i32 4, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP18]], <vscale x 4 x ptr> align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP2]]
+; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP13]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L_DEAD:%.*]] = load i8, ptr [[GEP_SRC_0]], align 1
; CHECK-NEXT: [[IV_1:%.*]] = add i64 [[IV]], 1
@@ -453,7 +470,7 @@ define void @gather_interleave_group_with_dead_insert_pos(i64 %N, ptr noalias %s
; CHECK-NEXT: store i32 [[EXT]], ptr [[GEP_DST]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 2
; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -481,26 +498,27 @@ exit:
attributes #0 = { "target-features"="+64bit,+v" }
;.
-; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-; CHECK: [[META4]] = !{[[META5:![0-9]+]]}
-; CHECK: [[META5]] = distinct !{[[META5]], [[META6:![0-9]+]]}
-; CHECK: [[META6]] = distinct !{[[META6]], !"LVerDomain"}
-; CHECK: [[META7]] = !{[[META8:![0-9]+]]}
-; CHECK: [[META8]] = distinct !{[[META8]], [[META6]]}
-; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]}
-; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]]}
-; CHECK: [[META11]] = !{[[META12:![0-9]+]]}
-; CHECK: [[META12]] = distinct !{[[META12]], [[META13:![0-9]+]]}
-; CHECK: [[META13]] = distinct !{[[META13]], !"LVerDomain"}
-; CHECK: [[META14]] = !{[[META15:![0-9]+]]}
-; CHECK: [[META15]] = distinct !{[[META15]], [[META13]]}
-; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]}
-; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]]}
-; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]}
-; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META2]], [[META1]]}
-; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META1]], [[META2]]}
-; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META2]], [[META1]]}
+; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]}
+; CHECK: [[META5]] = !{[[META6:![0-9]+]]}
+; CHECK: [[META6]] = distinct !{[[META6]], [[META7:![0-9]+]]}
+; CHECK: [[META7]] = distinct !{[[META7]], !"LVerDomain"}
+; CHECK: [[META8]] = !{[[META9:![0-9]+]]}
+; CHECK: [[META9]] = distinct !{[[META9]], [[META7]]}
+; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META3]]}
+; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]}
+; CHECK: [[META12]] = !{[[META13:![0-9]+]]}
+; CHECK: [[META13]] = distinct !{[[META13]], [[META14:![0-9]+]]}
+; CHECK: [[META14]] = distinct !{[[META14]], !"LVerDomain"}
+; CHECK: [[META15]] = !{[[META16:![0-9]+]]}
+; CHECK: [[META16]] = distinct !{[[META16]], [[META14]]}
+; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]], [[META3]]}
+; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]]}
+; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META3]], [[META1]]}
+; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP22]] = distinct !{[[LOOP22]], [[META3]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll b/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll
index db3215a6..7e48926 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/defaults.ll
@@ -13,15 +13,14 @@ target triple = "riscv64"
define void @vector_add(ptr noalias nocapture %a, i64 %v) {
; CHECK-LABEL: @vector_add(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP6]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -29,28 +28,31 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP9:%.*]] = add <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -74,44 +76,47 @@ for.end:
define i64 @vector_add_reduce(ptr noalias nocapture %a) {
; CHECK-LABEL: @vector_add_reduce(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP6]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
-; CHECK-NEXT: [[TMP9]] = add <vscale x 2 x i64> [[VEC_PHI]], [[WIDE_LOAD]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
+; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i64> [[VEC_PHI]], [[VP_OP_LOAD]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[VEC_PHI]], i32 [[TMP8]])
+; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP9]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP11]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]]
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll
index f02e5de..5d92081 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll
@@ -10,15 +10,14 @@ target triple = "riscv64"
define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @vector_udiv(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP6]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -26,28 +25,31 @@ define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP9:%.*]] = udiv <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[DIVREM:%.*]] = udiv i64 [[ELEM]], [[V]]
; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -61,14 +63,10 @@ define void @vector_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
-; FIXED-NEXT: [[TMP4:%.*]] = udiv <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
; FIXED-NEXT: [[TMP5:%.*]] = udiv <4 x i64> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]]
-; FIXED-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP1]], align 8
-; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP3]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP1]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; FIXED: middle.block:
@@ -108,15 +106,14 @@ for.end:
define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @vector_sdiv(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP6]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -124,28 +121,31 @@ define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP9:%.*]] = sdiv <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[DIVREM:%.*]] = sdiv i64 [[ELEM]], [[V]]
; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -159,14 +159,10 @@ define void @vector_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
-; FIXED-NEXT: [[TMP4:%.*]] = sdiv <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
; FIXED-NEXT: [[TMP5:%.*]] = sdiv <4 x i64> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]]
-; FIXED-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP1]], align 8
-; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP3]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP1]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; FIXED: middle.block:
@@ -206,15 +202,14 @@ for.end:
define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @vector_urem(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP6]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -222,28 +217,31 @@ define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP9:%.*]] = urem <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[DIVREM:%.*]] = urem i64 [[ELEM]], [[V]]
; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -257,14 +255,10 @@ define void @vector_urem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
-; FIXED-NEXT: [[TMP4:%.*]] = urem <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
; FIXED-NEXT: [[TMP5:%.*]] = urem <4 x i64> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]]
-; FIXED-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP1]], align 8
-; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP3]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP1]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; FIXED: middle.block:
@@ -304,15 +298,14 @@ for.end:
define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @vector_srem(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP6]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -320,28 +313,31 @@ define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP9:%.*]] = srem <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP9]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP9]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[DIVREM:%.*]] = srem i64 [[ELEM]], [[V]]
; CHECK-NEXT: store i64 [[DIVREM]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -355,14 +351,10 @@ define void @vector_srem(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
-; FIXED-NEXT: [[TMP4:%.*]] = srem <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
; FIXED-NEXT: [[TMP5:%.*]] = srem <4 x i64> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]]
-; FIXED-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP1]], align 8
-; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP3]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP1]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; FIXED: middle.block:
@@ -402,40 +394,47 @@ for.end:
define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @predicated_udiv(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP9:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP9]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <vscale x 2 x i64> [[BROADCAST_SPLAT]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP12]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 2 x i32> [[TMP7]], [[BROADCAST_SPLAT2]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP8]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP16:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP16]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1)
; CHECK-NEXT: [[TMP11:%.*]] = udiv <vscale x 2 x i64> [[WIDE_LOAD]], [[TMP10]]
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP8]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP16]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0
@@ -448,7 +447,7 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -464,16 +463,11 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8
-; FIXED-NEXT: [[TMP7:%.*]] = udiv <4 x i64> [[WIDE_LOAD]], [[TMP5]]
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8
; FIXED-NEXT: [[TMP8:%.*]] = udiv <4 x i64> [[WIDE_LOAD1]], [[TMP5]]
-; FIXED-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP7]], <4 x i64> [[WIDE_LOAD]]
; FIXED-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP8]], <4 x i64> [[WIDE_LOAD1]]
-; FIXED-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP2]], align 8
-; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP4]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP2]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; FIXED: middle.block:
@@ -525,40 +519,47 @@ for.end:
define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @predicated_sdiv(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP9:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP9]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <vscale x 2 x i64> [[BROADCAST_SPLAT]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP12]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 2 x i32> [[TMP7]], [[BROADCAST_SPLAT2]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP8]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP16:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP16]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1)
; CHECK-NEXT: [[TMP11:%.*]] = sdiv <vscale x 2 x i64> [[WIDE_LOAD]], [[TMP10]]
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP8]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP16]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[V]], 0
@@ -571,7 +572,7 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -587,16 +588,11 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8
-; FIXED-NEXT: [[TMP7:%.*]] = sdiv <4 x i64> [[WIDE_LOAD]], [[TMP5]]
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8
; FIXED-NEXT: [[TMP8:%.*]] = sdiv <4 x i64> [[WIDE_LOAD1]], [[TMP5]]
-; FIXED-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP7]], <4 x i64> [[WIDE_LOAD]]
; FIXED-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP8]], <4 x i64> [[WIDE_LOAD1]]
-; FIXED-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP2]], align 8
-; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP4]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP2]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; FIXED: middle.block:
@@ -648,37 +644,44 @@ for.end:
define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; CHECK-LABEL: @predicated_udiv_by_constant(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP8]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP14]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
-; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 42)
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 42)
; CHECK-NEXT: [[TMP10:%.*]] = udiv <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 27)
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP9]], <vscale x 2 x i64> [[TMP10]], <vscale x 2 x i64> [[WIDE_LOAD]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: [[TMP11:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> [[TMP9]], <vscale x 2 x i1> zeroinitializer
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]], <vscale x 2 x i64> [[TMP10]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
+; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP12]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42
@@ -691,7 +694,7 @@ define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -703,18 +706,12 @@ define void @predicated_udiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
-; FIXED-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[WIDE_LOAD]], splat (i64 42)
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
; FIXED-NEXT: [[TMP5:%.*]] = icmp ne <4 x i64> [[WIDE_LOAD1]], splat (i64 42)
-; FIXED-NEXT: [[TMP6:%.*]] = udiv <4 x i64> [[WIDE_LOAD]], splat (i64 27)
; FIXED-NEXT: [[TMP7:%.*]] = udiv <4 x i64> [[WIDE_LOAD1]], splat (i64 27)
-; FIXED-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP4]], <4 x i64> [[TMP6]], <4 x i64> [[WIDE_LOAD]]
; FIXED-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP5]], <4 x i64> [[TMP7]], <4 x i64> [[WIDE_LOAD1]]
-; FIXED-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP1]], align 8
-; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP3]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP1]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; FIXED: middle.block:
@@ -766,37 +763,44 @@ for.end:
define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; CHECK-LABEL: @predicated_sdiv_by_constant(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP8]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP14]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
-; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 42)
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 42)
; CHECK-NEXT: [[TMP10:%.*]] = sdiv <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 27)
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP9]], <vscale x 2 x i64> [[TMP10]], <vscale x 2 x i64> [[WIDE_LOAD]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: [[TMP11:%.*]] = select <vscale x 2 x i1> [[TMP15]], <vscale x 2 x i1> [[TMP9]], <vscale x 2 x i1> zeroinitializer
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]], <vscale x 2 x i64> [[TMP10]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
+; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP12]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[ELEM]], 42
@@ -809,7 +813,7 @@ define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -821,18 +825,12 @@ define void @predicated_sdiv_by_constant(ptr noalias nocapture %a, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 4
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
-; FIXED-NEXT: [[TMP4:%.*]] = icmp ne <4 x i64> [[WIDE_LOAD]], splat (i64 42)
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8
; FIXED-NEXT: [[TMP5:%.*]] = icmp ne <4 x i64> [[WIDE_LOAD1]], splat (i64 42)
-; FIXED-NEXT: [[TMP6:%.*]] = sdiv <4 x i64> [[WIDE_LOAD]], splat (i64 27)
; FIXED-NEXT: [[TMP7:%.*]] = sdiv <4 x i64> [[WIDE_LOAD1]], splat (i64 27)
-; FIXED-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP4]], <4 x i64> [[TMP6]], <4 x i64> [[WIDE_LOAD]]
; FIXED-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP5]], <4 x i64> [[TMP7]], <4 x i64> [[WIDE_LOAD1]]
-; FIXED-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP1]], align 8
-; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP3]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[PREDPHI2]], ptr [[TMP1]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; FIXED: middle.block:
@@ -884,38 +882,45 @@ for.end:
define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) {
; CHECK-LABEL: @predicated_sdiv_by_minus_one(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP8]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 16
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP12]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; CHECK-NEXT: [[TMP15:%.*]] = icmp ult <vscale x 16 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP7]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP12]])
; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 -128)
-; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i8> splat (i8 1)
+; CHECK-NEXT: [[TMP16:%.*]] = select <vscale x 16 x i1> [[TMP15]], <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 16 x i1> [[TMP16]], <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i8> splat (i8 1)
; CHECK-NEXT: [[TMP11:%.*]] = sdiv <vscale x 16 x i8> [[WIDE_LOAD]], [[TMP10]]
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 16 x i1> [[TMP9]], <vscale x 16 x i8> [[TMP11]], <vscale x 16 x i8> [[WIDE_LOAD]]
-; CHECK-NEXT: store <vscale x 16 x i8> [[PREDPHI]], ptr [[TMP7]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 16 x i1> [[TMP16]], <vscale x 16 x i8> [[TMP11]], <vscale x 16 x i8> [[WIDE_LOAD]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[PREDPHI]], ptr align 1 [[TMP7]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[ELEM]], -128
@@ -928,7 +933,7 @@ define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) {
; CHECK-NEXT: store i8 [[PHI]], ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -940,20 +945,13 @@ define void @predicated_sdiv_by_minus_one(ptr noalias nocapture %a, i64 %n) {
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; FIXED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 32
-; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <32 x i8>, ptr [[TMP1]], align 1
-; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <32 x i8>, ptr [[TMP3]], align 1
-; FIXED-NEXT: [[TMP4:%.*]] = icmp ne <32 x i8> [[WIDE_LOAD]], splat (i8 -128)
+; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <32 x i8>, ptr [[TMP1]], align 1
; FIXED-NEXT: [[TMP5:%.*]] = icmp ne <32 x i8> [[WIDE_LOAD1]], splat (i8 -128)
-; FIXED-NEXT: [[TMP6:%.*]] = select <32 x i1> [[TMP4]], <32 x i8> splat (i8 -1), <32 x i8> splat (i8 1)
; FIXED-NEXT: [[TMP7:%.*]] = select <32 x i1> [[TMP5]], <32 x i8> splat (i8 -1), <32 x i8> splat (i8 1)
-; FIXED-NEXT: [[TMP8:%.*]] = sdiv <32 x i8> [[WIDE_LOAD]], [[TMP6]]
; FIXED-NEXT: [[TMP9:%.*]] = sdiv <32 x i8> [[WIDE_LOAD1]], [[TMP7]]
-; FIXED-NEXT: [[PREDPHI:%.*]] = select <32 x i1> [[TMP4]], <32 x i8> [[TMP8]], <32 x i8> [[WIDE_LOAD]]
; FIXED-NEXT: [[PREDPHI2:%.*]] = select <32 x i1> [[TMP5]], <32 x i8> [[TMP9]], <32 x i8> [[WIDE_LOAD1]]
-; FIXED-NEXT: store <32 x i8> [[PREDPHI]], ptr [[TMP1]], align 1
-; FIXED-NEXT: store <32 x i8> [[PREDPHI2]], ptr [[TMP3]], align 1
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64
+; FIXED-NEXT: store <32 x i8> [[PREDPHI2]], ptr [[TMP1]], align 1
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; FIXED: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
index effaf57..22a5a0c 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s -check-prefix=NO-ZVFHMIN
+; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFHMIN
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFHMIN
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfhmin -S | FileCheck %s -check-prefix=ZVFHMIN
@@ -25,37 +26,39 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; ZVFHMIN-LABEL: define void @fadd(
; ZVFHMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; ZVFHMIN-NEXT: [[ENTRY:.*]]:
-; ZVFHMIN-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFHMIN-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 8
-; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP8]]
-; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; ZVFHMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; ZVFHMIN: [[VECTOR_PH]]:
; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]]
-; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; ZVFHMIN-NEXT: [[TMP3:%.*]] = sub i64 [[TMP10]], 1
+; ZVFHMIN-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP3]]
+; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]]
+; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; ZVFHMIN-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP12]], 8
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
; ZVFHMIN: [[VECTOR_BODY]]:
-; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; ZVFHMIN-NEXT: [[TMP1:%.*]] = getelementptr half, ptr [[A]], i64 [[INDEX]]
; ZVFHMIN-NEXT: [[TMP2:%.*]] = getelementptr half, ptr [[B]], i64 [[INDEX]]
-; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP1]], align 2
-; ZVFHMIN-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x half>, ptr [[TMP2]], align 2
+; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP1]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
+; ZVFHMIN-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP2]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
; ZVFHMIN-NEXT: [[TMP11:%.*]] = fadd <vscale x 8 x half> [[WIDE_LOAD]], [[WIDE_LOAD1]]
-; ZVFHMIN-NEXT: store <vscale x 8 x half> [[TMP11]], ptr [[TMP1]], align 2
-; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; ZVFHMIN-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; ZVFHMIN-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; ZVFHMIN-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP11]], ptr align 2 [[TMP1]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
+; ZVFHMIN-NEXT: [[TMP13:%.*]] = zext i32 [[TMP6]] to i64
+; ZVFHMIN-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[INDEX]]
+; ZVFHMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; ZVFHMIN-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; ZVFHMIN-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; ZVFHMIN: [[MIDDLE_BLOCK]]:
-; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; ZVFHMIN-NEXT: br label %[[EXIT:.*]]
; ZVFHMIN: [[SCALAR_PH]]:
-; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; ZVFHMIN-NEXT: br label %[[LOOP:.*]]
; ZVFHMIN: [[LOOP]]:
-; ZVFHMIN-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
+; ZVFHMIN-NEXT: [[I:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
; ZVFHMIN-NEXT: [[A_GEP:%.*]] = getelementptr half, ptr [[A]], i64 [[I]]
; ZVFHMIN-NEXT: [[B_GEP:%.*]] = getelementptr half, ptr [[B]], i64 [[I]]
; ZVFHMIN-NEXT: [[X:%.*]] = load half, ptr [[A_GEP]], align 2
@@ -64,7 +67,7 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; ZVFHMIN-NEXT: store half [[Z]], ptr [[A_GEP]], align 2
; ZVFHMIN-NEXT: [[I_NEXT]] = add i64 [[I]], 1
; ZVFHMIN-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; ZVFHMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; ZVFHMIN-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; ZVFHMIN: [[EXIT]]:
; ZVFHMIN-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll b/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll
index c9ba2af..8657e2f 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/fminimumnum.ll
@@ -12,7 +12,7 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP8]], 4
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP13]])
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
@@ -75,7 +75,7 @@ define void @fmin32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP1]])
; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
@@ -161,7 +161,7 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP8]], 4
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP13]])
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
@@ -224,7 +224,7 @@ define void @fmax32(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP1]])
; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
@@ -310,7 +310,7 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP8]], 2
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP13]])
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
@@ -373,7 +373,7 @@ define void @fmin64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP1]])
; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
@@ -459,7 +459,7 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP8]], 2
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP13]])
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
@@ -522,7 +522,7 @@ define void @fmax64(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 15, i64 [[TMP1]])
; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
@@ -606,11 +606,7 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64
; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP6]], 8
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]])
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8
@@ -625,28 +621,33 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP10]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 4096, [[TMP8]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP2]], align 2
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP2]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x half>, ptr [[TMP4]], align 2
+; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP4]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.minimumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]])
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 8 x half> [[TMP17]], ptr [[TMP7]], align 2
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
-; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP7]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP20]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
+; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 4096
+; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
@@ -659,7 +660,7 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -669,11 +670,7 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64
; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
-; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
-; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
-; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; ZVFHMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
@@ -688,28 +685,33 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN: [[VECTOR_PH]]:
; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]]
-; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
+; ZVFHMIN-NEXT: [[TMP14:%.*]] = sub i64 [[TMP10]], 1
+; ZVFHMIN-NEXT: [[N_RND_UP:%.*]] = add i64 4096, [[TMP14]]
+; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]]
+; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 8
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
; ZVFHMIN: [[VECTOR_BODY]]:
-; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
-; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP13]], align 2
+; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP13]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
-; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x half>, ptr [[TMP15]], align 2
+; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP15]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.minimumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]])
; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
-; ZVFHMIN-NEXT: store <vscale x 8 x half> [[TMP17]], ptr [[TMP18]], align 2
-; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
-; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; ZVFHMIN-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP18]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
+; ZVFHMIN-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64
+; ZVFHMIN-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; ZVFHMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 4096
; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; ZVFHMIN: [[MIDDLE_BLOCK]]:
-; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]]
-; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; ZVFHMIN-NEXT: br label %[[EXIT:.*]]
; ZVFHMIN: [[SCALAR_PH]]:
-; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
+; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]]
; ZVFHMIN: [[FOR_BODY]]:
; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
@@ -722,7 +724,7 @@ define void @fmin16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2
; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096
-; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; ZVFHMIN: [[EXIT]]:
; ZVFHMIN-NEXT: ret void
;
@@ -755,11 +757,7 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64
; CHECK-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64
; CHECK-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP6]], 8
-; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP13]])
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP14]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 8
@@ -774,28 +772,33 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[TMP10]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 4096, [[TMP8]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP2]], align 2
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP2]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x half>, ptr [[TMP4]], align 2
+; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP4]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
; CHECK-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.maximumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]])
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 8 x half> [[TMP17]], ptr [[TMP7]], align 2
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
-; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP7]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP20]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
+; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 4096
+; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
@@ -808,7 +811,7 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; CHECK-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 4096
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -818,11 +821,7 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: [[INPUT23:%.*]] = ptrtoint ptr [[INPUT2]] to i64
; ZVFHMIN-NEXT: [[INPUT12:%.*]] = ptrtoint ptr [[INPUT1]] to i64
; ZVFHMIN-NEXT: [[OUTPUT1:%.*]] = ptrtoint ptr [[OUTPUT]] to i64
-; ZVFHMIN-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; ZVFHMIN-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; ZVFHMIN-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
-; ZVFHMIN-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 4096, [[TMP2]]
-; ZVFHMIN-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; ZVFHMIN-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; ZVFHMIN: [[VECTOR_MEMCHECK]]:
; ZVFHMIN-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
@@ -837,28 +836,33 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN: [[VECTOR_PH]]:
; ZVFHMIN-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 8
-; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 4096, [[TMP10]]
-; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 4096, [[N_MOD_VF]]
+; ZVFHMIN-NEXT: [[TMP14:%.*]] = sub i64 [[TMP10]], 1
+; ZVFHMIN-NEXT: [[N_RND_UP:%.*]] = add i64 4096, [[TMP14]]
+; ZVFHMIN-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP10]]
+; ZVFHMIN-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; ZVFHMIN-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
; ZVFHMIN-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 8
; ZVFHMIN-NEXT: br label %[[VECTOR_BODY:.*]]
; ZVFHMIN: [[VECTOR_BODY]]:
-; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[AVL:%.*]] = phi i64 [ 4096, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; ZVFHMIN-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; ZVFHMIN-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT1]], i64 0, i64 [[INDEX]]
-; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP13]], align 2
+; ZVFHMIN-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP13]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
; ZVFHMIN-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[INPUT2]], i64 0, i64 [[INDEX]]
-; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 8 x half>, ptr [[TMP15]], align 2
+; ZVFHMIN-NEXT: [[WIDE_LOAD5:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 2 [[TMP15]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
; ZVFHMIN-NEXT: [[TMP17:%.*]] = call <vscale x 8 x half> @llvm.maximumnum.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD5]])
; ZVFHMIN-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [4096 x half], ptr [[OUTPUT]], i64 0, i64 [[INDEX]]
-; ZVFHMIN-NEXT: store <vscale x 8 x half> [[TMP17]], ptr [[TMP18]], align 2
-; ZVFHMIN-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
-; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; ZVFHMIN-NEXT: call void @llvm.vp.store.nxv8f16.p0(<vscale x 8 x half> [[TMP17]], ptr align 2 [[TMP18]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP19]])
+; ZVFHMIN-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64
+; ZVFHMIN-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; ZVFHMIN-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; ZVFHMIN-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 4096
+; ZVFHMIN-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; ZVFHMIN: [[MIDDLE_BLOCK]]:
-; ZVFHMIN-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, [[N_VEC]]
-; ZVFHMIN-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; ZVFHMIN-NEXT: br label %[[EXIT:.*]]
; ZVFHMIN: [[SCALAR_PH]]:
-; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
+; ZVFHMIN-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; ZVFHMIN-NEXT: br label %[[FOR_BODY:.*]]
; ZVFHMIN: [[FOR_BODY]]:
; ZVFHMIN-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
@@ -871,7 +875,7 @@ define void @fmax16(ptr noundef readonly captures(none) %input1, ptr noundef rea
; ZVFHMIN-NEXT: store half [[OUT]], ptr [[ARRAYIDX4]], align 2
; ZVFHMIN-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; ZVFHMIN-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 4096
-; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; ZVFHMIN-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; ZVFHMIN: [[EXIT]]:
; ZVFHMIN-NEXT: ret void
;
@@ -907,10 +911,11 @@ declare half @llvm.maximumnum.f16(half, half)
; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]}
; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]}
-; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
-; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]}
-; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
-; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]}
+; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META11:![0-9]+]], [[META2]]}
+; CHECK: [[META11]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]]}
+; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META11]], [[META2]]}
+; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]]}
;.
; ZVFHMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; ZVFHMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
@@ -922,8 +927,9 @@ declare half @llvm.maximumnum.f16(half, half)
; ZVFHMIN: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]}
; ZVFHMIN: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
; ZVFHMIN: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]}
-; ZVFHMIN: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
-; ZVFHMIN: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]}
-; ZVFHMIN: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
-; ZVFHMIN: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]}
+; ZVFHMIN: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META11:![0-9]+]], [[META2]]}
+; ZVFHMIN: [[META11]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; ZVFHMIN: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]]}
+; ZVFHMIN: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META11]], [[META2]]}
+; ZVFHMIN: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/force-vect-msg.ll b/llvm/test/Transforms/LoopVectorize/RISCV/force-vect-msg.ll
index 1ea70b6d..ae18c63 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/force-vect-msg.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/force-vect-msg.ll
@@ -3,8 +3,8 @@
; CHECK: LV: Loop hints: force=enabled
; CHECK: LV: Scalar loop costs: 4.
-; ChosenFactor.Cost is 4, but the real cost will be divided by the width, which is 2.
-; CHECK: Cost for VF 2: 4 (Estimated cost per lane: 2.0)
+; ChosenFactor.Cost is 9, but the real cost will be divided by the width, which is 2.2.
+; CHECK: Cost for VF vscale x 2: 9 (Estimated cost per lane: 2.2)
; Regardless of force vectorization or not, this loop will eventually be vectorized because of the cost model.
; Therefore, the following message does not need to be printed even if vectorization is explicitly forced in the metadata.
; CHECK-NOT: LV: Vectorization seems to be not beneficial, but was forced by a user.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
index e6825fa..60ea48e 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
@@ -20,11 +20,7 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 {
; CHECK-NEXT: [[TMP3:%.*]] = udiv i64 [[TMP2]], 3
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[UMIN21]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 1
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 8
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umax.i64(i64 128, i64 [[TMP7]])
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP5]], [[TMP8]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[TMP31:%.*]] = shl nsw i64 [[X_I64]], 1
; CHECK-NEXT: [[SCEVGEP9:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP31]]
@@ -59,40 +55,40 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 {
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP45:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP46:%.*]] = mul nuw i64 [[TMP45]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP5]], [[TMP46]]
-; CHECK-NEXT: [[TMP47:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP48:%.*]] = select i1 [[TMP47]], i64 [[TMP46]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP5]], [[TMP48]]
+; CHECK-NEXT: [[TMP22:%.*]] = sub i64 [[TMP46]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP5]], [[TMP22]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP46]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP51:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP52:%.*]] = mul nuw i64 [[TMP51]], 8
-; CHECK-NEXT: [[TMP49:%.*]] = mul i64 [[N_VEC]], 3
-; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[X_I64]], [[TMP49]]
-; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
-; CHECK-NEXT: [[TMP50:%.*]] = mul i32 [[DOTCAST]], 3
-; CHECK-NEXT: [[IND_END22:%.*]] = add i32 [[X_I32]], [[TMP50]]
; CHECK-NEXT: [[TMP53:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[X_I64]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP55:%.*]] = mul <vscale x 8 x i64> [[TMP53]], splat (i64 3)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> [[DOTSPLAT]], [[TMP55]]
-; CHECK-NEXT: [[TMP58:%.*]] = mul i64 3, [[TMP52]]
-; CHECK-NEXT: [[DOTSPLATINSERT24:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP58]], i64 0
-; CHECK-NEXT: [[DOTSPLAT25:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT24]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP5]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP27:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[TMP28:%.*]] = zext i32 [[TMP27]] to i64
+; CHECK-NEXT: [[TMP58:%.*]] = mul i64 3, [[TMP28]]
+; CHECK-NEXT: [[DOTSPLATINSERT24:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP58]], i64 0
+; CHECK-NEXT: [[DOTSPLAT25:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT24]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP59:%.*]] = getelementptr i16, ptr [[A]], <vscale x 8 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> [[TMP59]], i32 2, <vscale x 8 x i1> splat (i1 true)), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP52]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 [[TMP59]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP27]]), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]]
+; CHECK-NEXT: [[TMP47:%.*]] = zext i32 [[TMP27]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP47]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP47]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT25]]
-; CHECK-NEXT: [[TMP60:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP60]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP5]]
+; CHECK-NEXT: br i1 [[TMP48]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[X_I64]], %[[ENTRY]] ], [ [[X_I64]], %[[VECTOR_MEMCHECK]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL13:%.*]] = phi i32 [ [[IND_END22]], %[[MIDDLE_BLOCK]] ], [ [[X_I32]], %[[ENTRY]] ], [ [[X_I32]], %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[X_I64]], %[[ENTRY]] ], [ [[X_I64]], %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL13:%.*]] = phi i32 [ [[X_I32]], %[[ENTRY]] ], [ [[X_I32]], %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
@@ -107,7 +103,7 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 {
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 3
; CHECK-NEXT: [[TMP64]] = trunc i64 [[IV_NEXT]] to i32
; CHECK-NEXT: [[C:%.*]] = icmp slt i64 [[IV]], 99
-; CHECK-NEXT: br i1 [[C]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[C]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -144,8 +140,9 @@ attributes #0 = { "target-features"="+64bit,+v,+zvl256b" }
; CHECK: [[META3]] = !{[[META4:![0-9]+]], [[META5:![0-9]+]]}
; CHECK: [[META4]] = distinct !{[[META4]], [[META2]]}
; CHECK: [[META5]] = distinct !{[[META5]], [[META2]]}
-; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META7:![0-9]+]], [[META8:![0-9]+]]}
+; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META7:![0-9]+]], [[META8:![0-9]+]], [[META9:![0-9]+]]}
; CHECK: [[META7]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META8]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META7]]}
+; CHECK: [[META8]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[META9]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META7]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
index 1addff6..2f92ab5 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -mtriple riscv64-linux-gnu -mattr=+v,+d -passes=loop-vectorize < %s -S -o - | FileCheck %s -check-prefix=OUTLOOP
-; RUN: opt -mtriple riscv64-linux-gnu -mattr=+v,+d -passes=loop-vectorize -prefer-inloop-reductions < %s -S -o - | FileCheck %s -check-prefix=INLOOP
+; RUN: opt -mtriple riscv64-linux-gnu -mattr=+v,+d -prefer-predicate-over-epilogue=scalar-epilogue -passes=loop-vectorize < %s -S -o - | FileCheck %s -check-prefix=OUTLOOP
+; RUN: opt -mtriple riscv64-linux-gnu -mattr=+v,+d -prefer-predicate-over-epilogue=scalar-epilogue -passes=loop-vectorize -prefer-inloop-reductions < %s -S -o - | FileCheck %s -check-prefix=INLOOP
; RUN: opt -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck --check-prefix=IF-EVL-OUTLOOP %s
; RUN: opt -passes=loop-vectorize -prefer-inloop-reductions -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck --check-prefix=IF-EVL-INLOOP %s
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll
index e226eea..ad25a5f 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-accesses.ll
@@ -6,41 +6,49 @@
define void @load_store_factor2_i32(ptr %p) {
; CHECK-LABEL: @load_store_factor2_i32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP12]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP7]], align 4
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP14:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP14]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> [[TMP13]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32.p0(ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i32> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_MASKED_VEC]])
; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[TMP8]], splat (i32 1)
; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP9]], splat (i32 2)
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> [[TMP10]], <vscale x 4 x i32> [[TMP11]])
-; CHECK-NEXT: store <vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> [[TMP13]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv8i32.p0(<vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -53,7 +61,7 @@ define void @load_store_factor2_i32(ptr %p) {
; CHECK-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -102,41 +110,49 @@ define void @load_store_factor2_i32(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor2_i32(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP12:%.*]] = sub i64 [[TMP3]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP12]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP7]], align 4
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; SCALABLE-NEXT: [[TMP13:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP14:%.*]] = shl i64 [[INDEX]], 1
+; SCALABLE-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP14]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> [[TMP13]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32.p0(ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i32> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_MASKED_VEC]])
; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[TMP8]], splat (i32 1)
; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 4 x i32> [[TMP9]], splat (i32 2)
; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> [[TMP10]], <vscale x 4 x i32> [[TMP11]])
-; SCALABLE-NEXT: store <vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 4
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> [[TMP13]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv8i32.p0(<vscale x 8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -149,7 +165,7 @@ define void @load_store_factor2_i32(ptr %p) {
; SCALABLE-NEXT: store i32 [[Y1]], ptr [[Q1]], align 4
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -180,41 +196,49 @@ exit:
define void @load_store_factor2_i64(ptr %p) {
; CHECK-LABEL: @load_store_factor2_i64(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP12]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 4 x i64>, ptr [[TMP7]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64.p0(ptr [[TMP14]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK]], <vscale x 4 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP20:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
-; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 2 x i64> [[TMP8]], splat (i64 1)
+; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[TMP20]], splat (i64 1)
; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 2 x i64> [[TMP9]], splat (i64 2)
-; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> [[TMP10]], <vscale x 2 x i64> [[TMP11]])
-; CHECK-NEXT: store <vscale x 4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> [[TMP13]], <vscale x 2 x i64> [[TMP11]])
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv4i64.p0(<vscale x 4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -227,7 +251,7 @@ define void @load_store_factor2_i64(ptr %p) {
; CHECK-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -276,41 +300,49 @@ define void @load_store_factor2_i64(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor2_i64(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP12:%.*]] = sub i64 [[TMP3]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP12]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 4 x i64>, ptr [[TMP7]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_VEC]])
-; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; SCALABLE-NEXT: [[TMP10:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP8:%.*]] = shl i64 [[INDEX]], 1
+; SCALABLE-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64.p0(ptr [[TMP14]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK]], <vscale x 4 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_MASKED_VEC]])
+; SCALABLE-NEXT: [[TMP20:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
-; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 2 x i64> [[TMP8]], splat (i64 1)
+; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[TMP20]], splat (i64 1)
; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 2 x i64> [[TMP9]], splat (i64 2)
-; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> [[TMP10]], <vscale x 2 x i64> [[TMP11]])
-; SCALABLE-NEXT: store <vscale x 4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64> [[TMP13]], <vscale x 2 x i64> [[TMP11]])
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv4i64.p0(<vscale x 4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -323,7 +355,7 @@ define void @load_store_factor2_i64(ptr %p) {
; SCALABLE-NEXT: store i64 [[Y1]], ptr [[Q1]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -354,24 +386,30 @@ exit:
define void @load_store_factor3_i32(ptr %p) {
; CHECK-LABEL: @load_store_factor3_i32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP7]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP15:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP15]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 12 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> [[WIDE_VEC]])
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP14:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP16]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 12 x i1> @llvm.vector.interleave3.nxv12i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 12 x i32> @llvm.masked.load.nxv12i32.p0(ptr [[TMP17]], i32 4, <vscale x 12 x i1> [[INTERLEAVED_MASK]], <vscale x 12 x i32> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> [[WIDE_MASKED_VEC]])
; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 2
@@ -379,18 +417,20 @@ define void @load_store_factor3_i32(ptr %p) {
; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[TMP9]], splat (i32 2)
; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[TMP10]], splat (i32 3)
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> [[TMP11]], <vscale x 4 x i32> [[TMP12]], <vscale x 4 x i32> [[TMP13]])
-; CHECK-NEXT: store <vscale x 12 x i32> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 12 x i1> @llvm.vector.interleave3.nxv12i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv12i32.p0(<vscale x 12 x i32> [[INTERLEAVED_VEC]], ptr [[TMP17]], i32 4, <vscale x 12 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
+; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -408,7 +448,7 @@ define void @load_store_factor3_i32(ptr %p) {
; CHECK-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -466,24 +506,30 @@ define void @load_store_factor3_i32(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor3_i32(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP7]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP15:%.*]] = sub i64 [[TMP3]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP15]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3
-; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP0]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 12 x i32>, ptr [[TMP1]], align 4
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> [[WIDE_VEC]])
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; SCALABLE-NEXT: [[TMP14:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP16:%.*]] = mul i64 [[INDEX]], 3
+; SCALABLE-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP16]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 12 x i1> @llvm.vector.interleave3.nxv12i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 12 x i32> @llvm.masked.load.nxv12i32.p0(ptr [[TMP17]], i32 4, <vscale x 12 x i1> [[INTERLEAVED_MASK]], <vscale x 12 x i32> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave3.nxv12i32(<vscale x 12 x i32> [[WIDE_MASKED_VEC]])
; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 2
@@ -491,18 +537,20 @@ define void @load_store_factor3_i32(ptr %p) {
; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 4 x i32> [[TMP9]], splat (i32 2)
; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[TMP10]], splat (i32 3)
; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> [[TMP11]], <vscale x 4 x i32> [[TMP12]], <vscale x 4 x i32> [[TMP13]])
-; SCALABLE-NEXT: store <vscale x 12 x i32> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 4
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 12 x i1> @llvm.vector.interleave3.nxv12i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> [[TMP14]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv12i32.p0(<vscale x 12 x i32> [[INTERLEAVED_VEC]], ptr [[TMP17]], i32 4, <vscale x 12 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
+; SCALABLE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -520,7 +568,7 @@ define void @load_store_factor3_i32(ptr %p) {
; SCALABLE-NEXT: store i32 [[Y2]], ptr [[Q2]], align 4
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -557,43 +605,51 @@ exit:
define void @load_store_factor3_i64(ptr %p) {
; CHECK-LABEL: @load_store_factor3_i64(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP7]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP15:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP15]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 6 x i64>, ptr [[TMP1]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave3.nxv6i1(<vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.masked.load.nxv6i64.p0(ptr [[TMP14]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK]], <vscale x 6 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP23:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 2
-; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 2 x i64> [[TMP8]], splat (i64 1)
+; CHECK-NEXT: [[TMP25:%.*]] = add <vscale x 2 x i64> [[TMP23]], splat (i64 1)
; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i64> [[TMP9]], splat (i64 2)
; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[TMP10]], splat (i64 3)
-; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[TMP13]])
-; CHECK-NEXT: store <vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> [[TMP25]], <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[TMP13]])
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave3.nxv6i1(<vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv6i64.p0(<vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
+; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -611,7 +667,7 @@ define void @load_store_factor3_i64(ptr %p) {
; CHECK-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -669,43 +725,51 @@ define void @load_store_factor3_i64(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor3_i64(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP7]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP15:%.*]] = sub i64 [[TMP3]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP15]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 3
-; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 6 x i64>, ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> [[WIDE_VEC]])
-; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; SCALABLE-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 [[INDEX]], 3
+; SCALABLE-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave3.nxv6i1(<vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.masked.load.nxv6i64.p0(ptr [[TMP14]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK]], <vscale x 6 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave3.nxv6i64(<vscale x 6 x i64> [[WIDE_MASKED_VEC]])
+; SCALABLE-NEXT: [[TMP23:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 2
-; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 2 x i64> [[TMP8]], splat (i64 1)
+; SCALABLE-NEXT: [[TMP25:%.*]] = add <vscale x 2 x i64> [[TMP23]], splat (i64 1)
; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 2 x i64> [[TMP9]], splat (i64 2)
; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[TMP10]], splat (i64 3)
-; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[TMP13]])
-; SCALABLE-NEXT: store <vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave3.nxv6i64(<vscale x 2 x i64> [[TMP25]], <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[TMP13]])
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave3.nxv6i1(<vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]], <vscale x 2 x i1> [[TMP11]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv6i64.p0(<vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP14]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
+; SCALABLE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 3
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -723,7 +787,7 @@ define void @load_store_factor3_i64(ptr %p) {
; SCALABLE-NEXT: store i64 [[Y2]], ptr [[Q2]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP10:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -760,45 +824,53 @@ exit:
define void @load_store_factor4(ptr %p) {
; CHECK-LABEL: @load_store_factor4(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP18:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP18]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i64>, ptr [[TMP7]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave4.nxv8i64(<vscale x 8 x i64> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave4.nxv8i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr [[TMP9]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave4.nxv8i64(<vscale x 8 x i64> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP24:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 2
; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 3
-; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 2 x i64> [[TMP10]], splat (i64 1)
+; CHECK-NEXT: [[TMP26:%.*]] = add <vscale x 2 x i64> [[TMP24]], splat (i64 1)
; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 2 x i64> [[TMP11]], splat (i64 2)
; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 2 x i64> [[TMP12]], splat (i64 3)
; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 2 x i64> [[TMP13]], splat (i64 4)
-; CHECK-NEXT: [[INTERLEAVED_VEC4:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave4.nxv8i64(<vscale x 2 x i64> [[TMP14]], <vscale x 2 x i64> [[TMP15]], <vscale x 2 x i64> [[TMP16]], <vscale x 2 x i64> [[TMP17]])
-; CHECK-NEXT: store <vscale x 8 x i64> [[INTERLEAVED_VEC4]], ptr [[TMP7]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave4.nxv8i64(<vscale x 2 x i64> [[TMP26]], <vscale x 2 x i64> [[TMP15]], <vscale x 2 x i64> [[TMP16]], <vscale x 2 x i64> [[TMP17]])
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave4.nxv8i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv8i64.p0(<vscale x 8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP9]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP22]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
+; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -821,7 +893,7 @@ define void @load_store_factor4(ptr %p) {
; CHECK-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -886,45 +958,53 @@ define void @load_store_factor4(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor4(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP18:%.*]] = sub i64 [[TMP3]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP18]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 4
-; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i64>, ptr [[TMP7]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave4.nxv8i64(<vscale x 8 x i64> [[WIDE_VEC]])
-; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; SCALABLE-NEXT: [[TMP10:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 [[INDEX]], 4
+; SCALABLE-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP8]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave4.nxv8i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr [[TMP9]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave4.nxv8i64(<vscale x 8 x i64> [[WIDE_MASKED_VEC]])
+; SCALABLE-NEXT: [[TMP24:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 2
; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 3
-; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 2 x i64> [[TMP10]], splat (i64 1)
+; SCALABLE-NEXT: [[TMP26:%.*]] = add <vscale x 2 x i64> [[TMP24]], splat (i64 1)
; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 2 x i64> [[TMP11]], splat (i64 2)
; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 2 x i64> [[TMP12]], splat (i64 3)
; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 2 x i64> [[TMP13]], splat (i64 4)
-; SCALABLE-NEXT: [[INTERLEAVED_VEC4:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave4.nxv8i64(<vscale x 2 x i64> [[TMP14]], <vscale x 2 x i64> [[TMP15]], <vscale x 2 x i64> [[TMP16]], <vscale x 2 x i64> [[TMP17]])
-; SCALABLE-NEXT: store <vscale x 8 x i64> [[INTERLEAVED_VEC4]], ptr [[TMP7]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave4.nxv8i64(<vscale x 2 x i64> [[TMP26]], <vscale x 2 x i64> [[TMP15]], <vscale x 2 x i64> [[TMP16]], <vscale x 2 x i64> [[TMP17]])
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave4.nxv8i1(<vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i1> [[TMP10]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv8i64.p0(<vscale x 8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP9]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP22:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP22]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
+; SCALABLE-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 4
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -947,7 +1027,7 @@ define void @load_store_factor4(ptr %p) {
; SCALABLE-NEXT: store i64 [[Y3]], ptr [[Q3]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP11:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP12:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -990,44 +1070,53 @@ exit:
define void @load_store_factor5(ptr %p) {
; CHECK-LABEL: @load_store_factor5(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[TMP4]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP3]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 5
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 5 x i64>, ptr [[TMP1]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave5.nxv5i64(<vscale x 5 x i64> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1)
-; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2)
-; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3)
-; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4)
-; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5)
-; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.vector.interleave5.nxv5i64(<vscale x 1 x i64> [[TMP10]], <vscale x 1 x i64> [[TMP11]], <vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]])
-; CHECK-NEXT: store <vscale x 5 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP18]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 5
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 5 x i1> @llvm.vector.interleave5.nxv5i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.masked.load.nxv5i64.p0(ptr [[TMP19]], i32 8, <vscale x 5 x i1> [[INTERLEAVED_MASK]], <vscale x 5 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave5.nxv5i64(<vscale x 5 x i64> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.vector.interleave5.nxv5i64(<vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]])
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 5 x i1> @llvm.vector.interleave5.nxv5i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv5i64.p0(<vscale x 5 x i64> [[INTERLEAVED_VEC]], ptr [[TMP19]], i32 8, <vscale x 5 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP25:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP25]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]]
+; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -1055,7 +1144,7 @@ define void @load_store_factor5(ptr %p) {
; CHECK-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -1129,44 +1218,53 @@ define void @load_store_factor5(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor5(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP3:%.*]] = sub i64 [[TMP4]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP3]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 5
-; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 5 x i64>, ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave5.nxv5i64(<vscale x 5 x i64> [[WIDE_VEC]])
-; SCALABLE-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; SCALABLE-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; SCALABLE-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1)
-; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2)
-; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3)
-; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4)
-; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5)
-; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.vector.interleave5.nxv5i64(<vscale x 1 x i64> [[TMP10]], <vscale x 1 x i64> [[TMP11]], <vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]])
-; SCALABLE-NEXT: store <vscale x 5 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; SCALABLE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP18:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; SCALABLE-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP18]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 5
+; SCALABLE-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 5 x i1> @llvm.vector.interleave5.nxv5i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.masked.load.nxv5i64.p0(ptr [[TMP19]], i32 8, <vscale x 5 x i1> [[INTERLEAVED_MASK]], <vscale x 5 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave5.nxv5i64(<vscale x 5 x i64> [[WIDE_MASKED_VEC]])
+; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 5 x i64> @llvm.vector.interleave5.nxv5i64(<vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]])
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 5 x i1> @llvm.vector.interleave5.nxv5i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv5i64.p0(<vscale x 5 x i64> [[INTERLEAVED_VEC]], ptr [[TMP19]], i32 8, <vscale x 5 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP25:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP25]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]]
+; SCALABLE-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 5
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -1194,7 +1292,7 @@ define void @load_store_factor5(ptr %p) {
; SCALABLE-NEXT: store i64 [[Y4]], ptr [[Q4]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP13:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -1243,46 +1341,55 @@ exit:
define void @load_store_factor6(ptr %p) {
; CHECK-LABEL: @load_store_factor6(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[TMP4]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP3]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 6
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 6 x i64>, ptr [[TMP1]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave6.nxv6i64(<vscale x 6 x i64> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
-; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1)
-; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2)
-; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3)
-; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4)
-; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5)
-; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 6)
-; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave6.nxv6i64(<vscale x 1 x i64> [[TMP11]], <vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]])
-; CHECK-NEXT: store <vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP20]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 6
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave6.nxv6i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.masked.load.nxv6i64.p0(ptr [[TMP21]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK]], <vscale x 6 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave6.nxv6i64(<vscale x 6 x i64> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
+; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6)
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave6.nxv6i64(<vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]])
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave6.nxv6i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv6i64.p0(<vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP21]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP28:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP28]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP28]]
+; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -1315,7 +1422,7 @@ define void @load_store_factor6(ptr %p) {
; CHECK-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -1397,46 +1504,55 @@ define void @load_store_factor6(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor6(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP3:%.*]] = sub i64 [[TMP4]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP3]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 6
-; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 6 x i64>, ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave6.nxv6i64(<vscale x 6 x i64> [[WIDE_VEC]])
-; SCALABLE-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; SCALABLE-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; SCALABLE-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
-; SCALABLE-NEXT: [[TMP11:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1)
-; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2)
-; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3)
-; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4)
-; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5)
-; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 6)
-; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave6.nxv6i64(<vscale x 1 x i64> [[TMP11]], <vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]])
-; SCALABLE-NEXT: store <vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP20:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; SCALABLE-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP20]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 6
+; SCALABLE-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave6.nxv6i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.masked.load.nxv6i64.p0(ptr [[TMP21]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK]], <vscale x 6 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave6.nxv6i64(<vscale x 6 x i64> [[WIDE_MASKED_VEC]])
+; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
+; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; SCALABLE-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; SCALABLE-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6)
+; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 6 x i64> @llvm.vector.interleave6.nxv6i64(<vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]])
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 6 x i1> @llvm.vector.interleave6.nxv6i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv6i64.p0(<vscale x 6 x i64> [[INTERLEAVED_VEC]], ptr [[TMP21]], i32 8, <vscale x 6 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP28:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP28]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP28]]
+; SCALABLE-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 6
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -1469,7 +1585,7 @@ define void @load_store_factor6(ptr %p) {
; SCALABLE-NEXT: store i64 [[Y5]], ptr [[Q5]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -1524,48 +1640,57 @@ exit:
define void @load_store_factor7(ptr %p) {
; CHECK-LABEL: @load_store_factor7(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[TMP4]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP3]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 7
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 7 x i64>, ptr [[TMP1]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave7.nxv7i64(<vscale x 7 x i64> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
-; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
-; CHECK-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1)
-; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2)
-; CHECK-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3)
-; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4)
-; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5)
-; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 6)
-; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 7)
-; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.vector.interleave7.nxv7i64(<vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]])
-; CHECK-NEXT: store <vscale x 7 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP22]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 7
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 7 x i1> @llvm.vector.interleave7.nxv7i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.masked.load.nxv7i64.p0(ptr [[TMP23]], i32 8, <vscale x 7 x i1> [[INTERLEAVED_MASK]], <vscale x 7 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave7.nxv7i64(<vscale x 7 x i64> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
+; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
+; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6)
+; CHECK-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 7)
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.vector.interleave7.nxv7i64(<vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]])
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 7 x i1> @llvm.vector.interleave7.nxv7i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv7i64.p0(<vscale x 7 x i64> [[INTERLEAVED_VEC]], ptr [[TMP23]], i32 8, <vscale x 7 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP31:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP31]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP31]]
+; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -1603,7 +1728,7 @@ define void @load_store_factor7(ptr %p) {
; CHECK-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -1694,48 +1819,57 @@ define void @load_store_factor7(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor7(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP3]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP3:%.*]] = sub i64 [[TMP4]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP3]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP0:%.*]] = mul i64 [[INDEX]], 7
-; SCALABLE-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP0]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 7 x i64>, ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave7.nxv7i64(<vscale x 7 x i64> [[WIDE_VEC]])
-; SCALABLE-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; SCALABLE-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; SCALABLE-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
-; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
-; SCALABLE-NEXT: [[TMP12:%.*]] = add <vscale x 1 x i64> [[TMP5]], splat (i64 1)
-; SCALABLE-NEXT: [[TMP13:%.*]] = add <vscale x 1 x i64> [[TMP6]], splat (i64 2)
-; SCALABLE-NEXT: [[TMP14:%.*]] = add <vscale x 1 x i64> [[TMP7]], splat (i64 3)
-; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 4)
-; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 5)
-; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 6)
-; SCALABLE-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 7)
-; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.vector.interleave7.nxv7i64(<vscale x 1 x i64> [[TMP12]], <vscale x 1 x i64> [[TMP13]], <vscale x 1 x i64> [[TMP14]], <vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]])
-; SCALABLE-NEXT: store <vscale x 7 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; SCALABLE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP22:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; SCALABLE-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP22]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP6:%.*]] = mul i64 [[INDEX]], 7
+; SCALABLE-NEXT: [[TMP23:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 7 x i1> @llvm.vector.interleave7.nxv7i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.masked.load.nxv7i64.p0(ptr [[TMP23]], i32 8, <vscale x 7 x i1> [[INTERLEAVED_MASK]], <vscale x 7 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave7.nxv7i64(<vscale x 7 x i64> [[WIDE_MASKED_VEC]])
+; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
+; SCALABLE-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
+; SCALABLE-NEXT: [[TMP15:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; SCALABLE-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; SCALABLE-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; SCALABLE-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6)
+; SCALABLE-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 7)
+; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 7 x i64> @llvm.vector.interleave7.nxv7i64(<vscale x 1 x i64> [[TMP15]], <vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]])
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 7 x i1> @llvm.vector.interleave7.nxv7i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv7i64.p0(<vscale x 7 x i64> [[INTERLEAVED_VEC]], ptr [[TMP23]], i32 8, <vscale x 7 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP31:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP31]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP31]]
+; SCALABLE-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = mul i64 [[I]], 7
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -1773,7 +1907,7 @@ define void @load_store_factor7(ptr %p) {
; SCALABLE-NEXT: store i64 [[Y6]], ptr [[Q6]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP17:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP18:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -1834,50 +1968,59 @@ exit:
define void @load_store_factor8(ptr %p) {
; CHECK-LABEL: @load_store_factor8(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP0]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP36:%.*]] = sub i64 [[TMP1]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP36]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP3]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i64>, ptr [[TMP4]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave8.nxv8i64(<vscale x 8 x i64> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
-; CHECK-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
-; CHECK-NEXT: [[TMP18:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 7
-; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 1)
-; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 2)
-; CHECK-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 3)
-; CHECK-NEXT: [[TMP22:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 4)
-; CHECK-NEXT: [[TMP23:%.*]] = add <vscale x 1 x i64> [[TMP15]], splat (i64 5)
-; CHECK-NEXT: [[TMP24:%.*]] = add <vscale x 1 x i64> [[TMP16]], splat (i64 6)
-; CHECK-NEXT: [[TMP25:%.*]] = add <vscale x 1 x i64> [[TMP17]], splat (i64 7)
-; CHECK-NEXT: [[TMP26:%.*]] = add <vscale x 1 x i64> [[TMP18]], splat (i64 8)
-; CHECK-NEXT: [[INTERLEAVED_VEC12:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave8.nxv8i64(<vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]], <vscale x 1 x i64> [[TMP22]], <vscale x 1 x i64> [[TMP23]], <vscale x 1 x i64> [[TMP24]], <vscale x 1 x i64> [[TMP25]], <vscale x 1 x i64> [[TMP26]])
-; CHECK-NEXT: store <vscale x 8 x i64> [[INTERLEAVED_VEC12]], ptr [[TMP4]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave8.nxv8i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr [[TMP24]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave8.nxv8i64(<vscale x 8 x i64> [[WIDE_MASKED_VEC]])
+; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; CHECK-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; CHECK-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; CHECK-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
+; CHECK-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
+; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 7
+; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; CHECK-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; CHECK-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; CHECK-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6)
+; CHECK-NEXT: [[TMP22:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 7)
+; CHECK-NEXT: [[TMP23:%.*]] = add <vscale x 1 x i64> [[TMP15]], splat (i64 8)
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave8.nxv8i64(<vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]], <vscale x 1 x i64> [[TMP22]], <vscale x 1 x i64> [[TMP23]])
+; CHECK-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave8.nxv8i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv8i64.p0(<vscale x 8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP24]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK1]])
+; CHECK-NEXT: [[TMP34:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP34]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP34]]
+; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -1920,7 +2063,7 @@ define void @load_store_factor8(ptr %p) {
; CHECK-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -2017,50 +2160,59 @@ define void @load_store_factor8(ptr %p) {
;
; SCALABLE-LABEL: @load_store_factor8(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP0]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
; SCALABLE-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP1]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP36:%.*]] = sub i64 [[TMP1]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP36]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP3:%.*]] = shl i64 [[INDEX]], 3
-; SCALABLE-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP3]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i64>, ptr [[TMP4]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave8.nxv8i64(<vscale x 8 x i64> [[WIDE_VEC]])
-; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
-; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
-; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
-; SCALABLE-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
-; SCALABLE-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
-; SCALABLE-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
-; SCALABLE-NEXT: [[TMP17:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
-; SCALABLE-NEXT: [[TMP18:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 7
-; SCALABLE-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 1)
-; SCALABLE-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 2)
-; SCALABLE-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 3)
-; SCALABLE-NEXT: [[TMP22:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 4)
-; SCALABLE-NEXT: [[TMP23:%.*]] = add <vscale x 1 x i64> [[TMP15]], splat (i64 5)
-; SCALABLE-NEXT: [[TMP24:%.*]] = add <vscale x 1 x i64> [[TMP16]], splat (i64 6)
-; SCALABLE-NEXT: [[TMP25:%.*]] = add <vscale x 1 x i64> [[TMP17]], splat (i64 7)
-; SCALABLE-NEXT: [[TMP26:%.*]] = add <vscale x 1 x i64> [[TMP18]], splat (i64 8)
-; SCALABLE-NEXT: [[INTERLEAVED_VEC12:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave8.nxv8i64(<vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]], <vscale x 1 x i64> [[TMP22]], <vscale x 1 x i64> [[TMP23]], <vscale x 1 x i64> [[TMP24]], <vscale x 1 x i64> [[TMP25]], <vscale x 1 x i64> [[TMP26]])
-; SCALABLE-NEXT: store <vscale x 8 x i64> [[INTERLEAVED_VEC12]], ptr [[TMP4]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; SCALABLE-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 1, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 1 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 1 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP4:%.*]] = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; SCALABLE-NEXT: [[TMP5:%.*]] = icmp ult <vscale x 1 x i32> [[TMP4]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 3
+; SCALABLE-NEXT: [[TMP24:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave8.nxv8i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64.p0(ptr [[TMP24]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } @llvm.vector.deinterleave8.nxv8i64(<vscale x 8 x i64> [[WIDE_MASKED_VEC]])
+; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 0
+; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 1
+; SCALABLE-NEXT: [[TMP10:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 2
+; SCALABLE-NEXT: [[TMP11:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 3
+; SCALABLE-NEXT: [[TMP12:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 4
+; SCALABLE-NEXT: [[TMP13:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 5
+; SCALABLE-NEXT: [[TMP14:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 6
+; SCALABLE-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64> } [[STRIDED_VEC]], 7
+; SCALABLE-NEXT: [[TMP16:%.*]] = add <vscale x 1 x i64> [[TMP8]], splat (i64 1)
+; SCALABLE-NEXT: [[TMP17:%.*]] = add <vscale x 1 x i64> [[TMP9]], splat (i64 2)
+; SCALABLE-NEXT: [[TMP18:%.*]] = add <vscale x 1 x i64> [[TMP10]], splat (i64 3)
+; SCALABLE-NEXT: [[TMP19:%.*]] = add <vscale x 1 x i64> [[TMP11]], splat (i64 4)
+; SCALABLE-NEXT: [[TMP20:%.*]] = add <vscale x 1 x i64> [[TMP12]], splat (i64 5)
+; SCALABLE-NEXT: [[TMP21:%.*]] = add <vscale x 1 x i64> [[TMP13]], splat (i64 6)
+; SCALABLE-NEXT: [[TMP22:%.*]] = add <vscale x 1 x i64> [[TMP14]], splat (i64 7)
+; SCALABLE-NEXT: [[TMP23:%.*]] = add <vscale x 1 x i64> [[TMP15]], splat (i64 8)
+; SCALABLE-NEXT: [[INTERLEAVED_VEC:%.*]] = call <vscale x 8 x i64> @llvm.vector.interleave8.nxv8i64(<vscale x 1 x i64> [[TMP16]], <vscale x 1 x i64> [[TMP17]], <vscale x 1 x i64> [[TMP18]], <vscale x 1 x i64> [[TMP19]], <vscale x 1 x i64> [[TMP20]], <vscale x 1 x i64> [[TMP21]], <vscale x 1 x i64> [[TMP22]], <vscale x 1 x i64> [[TMP23]])
+; SCALABLE-NEXT: [[INTERLEAVED_MASK1:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave8.nxv8i1(<vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]], <vscale x 1 x i1> [[TMP5]])
+; SCALABLE-NEXT: call void @llvm.masked.store.nxv8i64.p0(<vscale x 8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP24]], i32 8, <vscale x 8 x i1> [[INTERLEAVED_MASK1]])
+; SCALABLE-NEXT: [[TMP34:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP34]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP34]]
+; SCALABLE-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 3
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -2103,7 +2255,7 @@ define void @load_store_factor8(ptr %p) {
; SCALABLE-NEXT: store i64 [[Y7]], ptr [[Q7]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP19:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP20:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -2170,40 +2322,47 @@ exit:
define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) {
; CHECK-LABEL: @combine_load_factor2_i32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP14:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP14]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP7]], align 4
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP13:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP13]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP12]], <vscale x 4 x i1> [[TMP12]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32.p0(ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i32> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_MASKED_VEC]])
; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[TMP9]]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[Q:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP11]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -2215,7 +2374,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) {
; CHECK-NEXT: store i32 [[RES]], ptr [[DST]], align 4
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -2226,24 +2385,15 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) {
; FIXED-NEXT: br label [[VECTOR_BODY:%.*]]
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; FIXED-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 8
-; FIXED-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX]], 1
-; FIXED-NEXT: [[TMP2:%.*]] = shl i64 [[TMP0]], 1
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP1]]
-; FIXED-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP2]]
-; FIXED-NEXT: [[WIDE_VEC:%.*]] = load <16 x i32>, ptr [[TMP3]], align 4
-; FIXED-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-; FIXED-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+; FIXED-NEXT: [[TMP2:%.*]] = shl i64 [[INDEX]], 1
+; FIXED-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP2]]
; FIXED-NEXT: [[WIDE_VEC2:%.*]] = load <16 x i32>, ptr [[TMP4]], align 4
; FIXED-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <16 x i32> [[WIDE_VEC2]], <16 x i32> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
; FIXED-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <16 x i32> [[WIDE_VEC2]], <16 x i32> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
-; FIXED-NEXT: [[TMP5:%.*]] = add <8 x i32> [[STRIDED_VEC]], [[STRIDED_VEC1]]
; FIXED-NEXT: [[TMP6:%.*]] = add <8 x i32> [[STRIDED_VEC3]], [[STRIDED_VEC4]]
; FIXED-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[Q:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[TMP7]], i32 8
-; FIXED-NEXT: store <8 x i32> [[TMP5]], ptr [[TMP7]], align 4
-; FIXED-NEXT: store <8 x i32> [[TMP6]], ptr [[TMP9]], align 4
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; FIXED-NEXT: store <8 x i32> [[TMP6]], ptr [[TMP7]], align 4
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; FIXED: middle.block:
@@ -2270,40 +2420,47 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) {
;
; SCALABLE-LABEL: @combine_load_factor2_i32(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP14:%.*]] = sub i64 [[TMP3]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP14]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP6]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP7]], align 4
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; SCALABLE-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 4 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP13:%.*]] = shl i64 [[INDEX]], 1
+; SCALABLE-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP13]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> [[TMP12]], <vscale x 4 x i1> [[TMP12]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32.p0(ptr [[TMP15]], i32 4, <vscale x 8 x i1> [[INTERLEAVED_MASK]], <vscale x 8 x i32> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_MASKED_VEC]])
; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 1
; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[TMP8]], [[TMP9]]
; SCALABLE-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[Q:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP11]], align 4
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; SCALABLE-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -2315,7 +2472,7 @@ define void @combine_load_factor2_i32(ptr noalias %p, ptr noalias %q) {
; SCALABLE-NEXT: store i32 [[RES]], ptr [[DST]], align 4
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP21:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP22:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
@@ -2347,40 +2504,47 @@ exit:
define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) {
; CHECK-LABEL: @combine_load_factor2_i64(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP14:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP14]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 4 x i64>, ptr [[TMP7]], align 8
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_VEC]])
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP13:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP13]]
+; CHECK-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP12]], <vscale x 2 x i1> [[TMP12]])
+; CHECK-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64.p0(ptr [[TMP15]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK]], <vscale x 4 x i64> poison)
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_MASKED_VEC]])
; CHECK-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 2 x i64> [[TMP8]], [[TMP9]]
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[Q:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP10]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
+; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; CHECK-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -2392,7 +2556,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) {
; CHECK-NEXT: store i64 [[RES]], ptr [[DST]], align 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -2403,24 +2567,15 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) {
; FIXED-NEXT: br label [[VECTOR_BODY:%.*]]
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; FIXED-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 4
-; FIXED-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX]], 1
-; FIXED-NEXT: [[TMP2:%.*]] = shl i64 [[TMP0]], 1
-; FIXED-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP1]]
-; FIXED-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]]
-; FIXED-NEXT: [[WIDE_VEC:%.*]] = load <8 x i64>, ptr [[TMP3]], align 8
-; FIXED-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i64> [[WIDE_VEC]], <8 x i64> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
-; FIXED-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <8 x i64> [[WIDE_VEC]], <8 x i64> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+; FIXED-NEXT: [[TMP2:%.*]] = shl i64 [[INDEX]], 1
+; FIXED-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP2]]
; FIXED-NEXT: [[WIDE_VEC2:%.*]] = load <8 x i64>, ptr [[TMP4]], align 8
; FIXED-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <8 x i64> [[WIDE_VEC2]], <8 x i64> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; FIXED-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <8 x i64> [[WIDE_VEC2]], <8 x i64> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
-; FIXED-NEXT: [[TMP5:%.*]] = add <4 x i64> [[STRIDED_VEC]], [[STRIDED_VEC1]]
; FIXED-NEXT: [[TMP6:%.*]] = add <4 x i64> [[STRIDED_VEC3]], [[STRIDED_VEC4]]
; FIXED-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[Q:%.*]], i64 [[INDEX]]
-; FIXED-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[TMP7]], i32 4
-; FIXED-NEXT: store <4 x i64> [[TMP5]], ptr [[TMP7]], align 8
-; FIXED-NEXT: store <4 x i64> [[TMP6]], ptr [[TMP9]], align 8
-; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; FIXED-NEXT: store <4 x i64> [[TMP6]], ptr [[TMP7]], align 8
+; FIXED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; FIXED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; FIXED: middle.block:
@@ -2447,40 +2602,47 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) {
;
; SCALABLE-LABEL: @combine_load_factor2_i64(
; SCALABLE-NEXT: entry:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; SCALABLE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; SCALABLE: vector.ph:
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP14:%.*]] = sub i64 [[TMP3]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP14]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
; SCALABLE: vector.body:
; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP6:%.*]] = shl i64 [[INDEX]], 1
-; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP6]]
-; SCALABLE-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 4 x i64>, ptr [[TMP7]], align 8
-; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_VEC]])
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP7]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; SCALABLE-NEXT: [[TMP12:%.*]] = icmp ult <vscale x 2 x i32> [[TMP6]], [[BROADCAST_SPLAT]]
+; SCALABLE-NEXT: [[TMP13:%.*]] = shl i64 [[INDEX]], 1
+; SCALABLE-NEXT: [[TMP15:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[TMP13]]
+; SCALABLE-NEXT: [[INTERLEAVED_MASK:%.*]] = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> [[TMP12]], <vscale x 2 x i1> [[TMP12]])
+; SCALABLE-NEXT: [[WIDE_MASKED_VEC:%.*]] = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64.p0(ptr [[TMP15]], i32 8, <vscale x 4 x i1> [[INTERLEAVED_MASK]], <vscale x 4 x i64> poison)
+; SCALABLE-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.vector.deinterleave2.nxv4i64(<vscale x 4 x i64> [[WIDE_MASKED_VEC]])
; SCALABLE-NEXT: [[TMP8:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 0
; SCALABLE-NEXT: [[TMP9:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[STRIDED_VEC]], 1
; SCALABLE-NEXT: [[TMP10:%.*]] = add <vscale x 2 x i64> [[TMP8]], [[TMP9]]
; SCALABLE-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[Q:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP10]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
+; SCALABLE-NEXT: [[TMP16:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; SCALABLE-NEXT: br label [[EXIT:%.*]]
; SCALABLE: scalar.ph:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; SCALABLE-NEXT: br label [[LOOP:%.*]]
; SCALABLE: loop:
-; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; SCALABLE-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; SCALABLE-NEXT: [[OFFSET0:%.*]] = shl i64 [[I]], 1
; SCALABLE-NEXT: [[Q0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET0]]
; SCALABLE-NEXT: [[X0:%.*]] = load i64, ptr [[Q0]], align 8
@@ -2492,7 +2654,7 @@ define void @combine_load_factor2_i64(ptr noalias %p, ptr noalias %q) {
; SCALABLE-NEXT: store i64 [[RES]], ptr [[DST]], align 8
; SCALABLE-NEXT: [[NEXTI]] = add i64 [[I]], 1
; SCALABLE-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP23:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP24:![0-9]+]]
; SCALABLE: exit:
; SCALABLE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll
index 056dc7e..ee91f75 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll
@@ -7,22 +7,31 @@
; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf6-segment-load-store -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=OPT-NF6
; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf7-segment-load-store -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=OPT-NF7
; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf8-segment-load-store -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=OPT-NF8
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,-optimized-nf2-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-NO-OPT
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF2
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf3-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF3
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf4-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF4
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf5-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF5
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf6-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF6
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf7-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF7
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v,+optimized-nf8-segment-load-store -scalable-vectorization=off -debug-only=loop-vectorize -disable-output < %s 2>&1 | FileCheck %s --check-prefix=FIXED-OPT-NF8
%i8.2 = type {i8, i8}
define void @i8_factor_2(ptr %data, i64 %n) {
entry:
br label %for.body
+; FIXED-OPT-NF2-LABEL: Checking a loop in 'i8_factor_2'
+; FIXED-OPT-NF2: Cost of 3 for VF 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-OPT-NF2: Cost of 3 for VF 2: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-OPT-NF2: Cost of 3 for VF 4: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-OPT-NF2: Cost of 3 for VF 4: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-OPT-NF2: Cost of 3 for VF 8: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-OPT-NF2: Cost of 3 for VF 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-OPT-NF2: Cost of 4 for VF 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-OPT-NF2: Cost of 4 for VF 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-OPT-NF2: Cost of 8 for VF 32: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-OPT-NF2: Cost of 8 for VF 32: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
; OPT-NF2-LABEL: Checking a loop in 'i8_factor_2'
-; OPT-NF2: Cost of 3 for VF 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; OPT-NF2: Cost of 3 for VF 2: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; OPT-NF2: Cost of 3 for VF 4: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; OPT-NF2: Cost of 3 for VF 4: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; OPT-NF2: Cost of 3 for VF 8: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; OPT-NF2: Cost of 3 for VF 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; OPT-NF2: Cost of 4 for VF 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; OPT-NF2: Cost of 4 for VF 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; OPT-NF2: Cost of 8 for VF 32: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; OPT-NF2: Cost of 8 for VF 32: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
; OPT-NF2: Cost of 3 for VF vscale x 1: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
; OPT-NF2: Cost of 3 for VF vscale x 1: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
; OPT-NF2: Cost of 3 for VF vscale x 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
@@ -33,17 +42,18 @@ entry:
; OPT-NF2: Cost of 4 for VF vscale x 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
; OPT-NF2: Cost of 8 for VF vscale x 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
; OPT-NF2: Cost of 8 for VF vscale x 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_2'
+; FIXED-NO-OPT: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 16 for VF 8: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 16 for VF 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 32 for VF 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 32 for VF 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 64 for VF 32: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 64 for VF 32: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_2'
-; NO-OPT: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; NO-OPT: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; NO-OPT: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; NO-OPT: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; NO-OPT: Cost of 16 for VF 8: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; NO-OPT: Cost of 16 for VF 8: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; NO-OPT: Cost of 32 for VF 16: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; NO-OPT: Cost of 32 for VF 16: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
-; NO-OPT: Cost of 64 for VF 32: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
-; NO-OPT: Cost of 64 for VF 32: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
; NO-OPT: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
; NO-OPT: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 2 at <badref>, ir<%p0>
; NO-OPT: Cost of 8 for VF vscale x 2: INTERLEAVE-GROUP with factor 2 at %l0, ir<%p0>
@@ -76,17 +86,18 @@ for.end:
define void @i8_factor_3(ptr %data, i64 %n) {
entry:
br label %for.body
+; FIXED-OPT-NF3-LABEL: Checking a loop in 'i8_factor_3'
+; FIXED-OPT-NF3: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-OPT-NF3: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-OPT-NF3: Cost of 4 for VF 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-OPT-NF3: Cost of 4 for VF 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-OPT-NF3: Cost of 5 for VF 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-OPT-NF3: Cost of 5 for VF 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-OPT-NF3: Cost of 7 for VF 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-OPT-NF3: Cost of 7 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
; OPT-NF3-LABEL: Checking a loop in 'i8_factor_3'
-; OPT-NF3: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; OPT-NF3: Cost of 4 for VF 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; OPT-NF3: Cost of 4 for VF 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; OPT-NF3: Cost of 4 for VF 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; OPT-NF3: Cost of 5 for VF 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; OPT-NF3: Cost of 5 for VF 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; OPT-NF3: Cost of 7 for VF 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; OPT-NF3: Cost of 7 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
; OPT-NF3: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
; OPT-NF3: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
; OPT-NF3: Cost of 4 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
@@ -97,17 +108,18 @@ entry:
; OPT-NF3: Cost of 7 for VF vscale x 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
; OPT-NF3: Cost of 14 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
; OPT-NF3: Cost of 14 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_3'
+; FIXED-NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 12 for VF 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 12 for VF 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 24 for VF 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 24 for VF 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 48 for VF 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 48 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_3'
-; NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; NO-OPT: Cost of 12 for VF 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; NO-OPT: Cost of 12 for VF 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; NO-OPT: Cost of 24 for VF 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; NO-OPT: Cost of 24 for VF 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; NO-OPT: Cost of 48 for VF 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; NO-OPT: Cost of 48 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
-; NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
-; NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
; NO-OPT: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
; NO-OPT: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
; NO-OPT: Cost of 12 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
@@ -144,17 +156,18 @@ for.end:
define void @i8_factor_4(ptr %data, i64 %n) {
entry:
br label %for.body
+; FIXED-OPT-NF4-LABEL: Checking a loop in 'i8_factor_4'
+; FIXED-OPT-NF4: Cost of 5 for VF 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-OPT-NF4: Cost of 5 for VF 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-OPT-NF4: Cost of 5 for VF 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-OPT-NF4: Cost of 5 for VF 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-OPT-NF4: Cost of 6 for VF 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-OPT-NF4: Cost of 6 for VF 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-OPT-NF4: Cost of 8 for VF 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-OPT-NF4: Cost of 8 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
; OPT-NF4-LABEL: Checking a loop in 'i8_factor_4'
-; OPT-NF4: Cost of 5 for VF 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; OPT-NF4: Cost of 5 for VF 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; OPT-NF4: Cost of 5 for VF 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; OPT-NF4: Cost of 5 for VF 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; OPT-NF4: Cost of 6 for VF 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; OPT-NF4: Cost of 6 for VF 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; OPT-NF4: Cost of 8 for VF 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; OPT-NF4: Cost of 8 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
; OPT-NF4: Cost of 5 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
; OPT-NF4: Cost of 5 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
; OPT-NF4: Cost of 5 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
@@ -165,17 +178,18 @@ entry:
; OPT-NF4: Cost of 8 for VF vscale x 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
; OPT-NF4: Cost of 16 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
; OPT-NF4: Cost of 16 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_4'
+; FIXED-NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 16 for VF 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 16 for VF 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 32 for VF 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 32 for VF 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 64 for VF 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 64 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_4'
-; NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; NO-OPT: Cost of 16 for VF 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; NO-OPT: Cost of 16 for VF 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; NO-OPT: Cost of 32 for VF 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; NO-OPT: Cost of 32 for VF 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; NO-OPT: Cost of 64 for VF 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; NO-OPT: Cost of 64 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
-; NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
-; NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
; NO-OPT: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
; NO-OPT: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
; NO-OPT: Cost of 16 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
@@ -216,15 +230,16 @@ for.end:
define void @i8_factor_5(ptr %data, i64 %n) {
entry:
br label %for.body
+; FIXED-OPT-NF5-LABEL: Checking a loop in 'i8_factor_5'
+; FIXED-OPT-NF5: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-OPT-NF5: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; FIXED-OPT-NF5: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-OPT-NF5: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; FIXED-OPT-NF5: Cost of 9 for VF 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-OPT-NF5: Cost of 9 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; FIXED-OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
; OPT-NF5-LABEL: Checking a loop in 'i8_factor_5'
-; OPT-NF5: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; OPT-NF5: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
-; OPT-NF5: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; OPT-NF5: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
-; OPT-NF5: Cost of 9 for VF 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; OPT-NF5: Cost of 9 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
-; OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
; OPT-NF5: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
; OPT-NF5: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
; OPT-NF5: Cost of 7 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
@@ -233,15 +248,16 @@ entry:
; OPT-NF5: Cost of 9 for VF vscale x 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
; OPT-NF5: Cost of 13 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
; OPT-NF5: Cost of 13 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_5'
+; FIXED-NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 20 for VF 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 20 for VF 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 40 for VF 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 40 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_5'
-; NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
-; NO-OPT: Cost of 20 for VF 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; NO-OPT: Cost of 20 for VF 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
-; NO-OPT: Cost of 40 for VF 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; NO-OPT: Cost of 40 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
-; NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
-; NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
; NO-OPT: Cost of 10 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
; NO-OPT: Cost of 10 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
; NO-OPT: Cost of 20 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
@@ -284,15 +300,16 @@ for.end:
define void @i8_factor_6(ptr %data, i64 %n) {
entry:
br label %for.body
+; FIXED-OPT-NF6-LABEL: Checking a loop in 'i8_factor_6'
+; FIXED-OPT-NF6: Cost of 7 for VF 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-OPT-NF6: Cost of 7 for VF 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; FIXED-OPT-NF6: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-OPT-NF6: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; FIXED-OPT-NF6: Cost of 10 for VF 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-OPT-NF6: Cost of 10 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; FIXED-OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
; OPT-NF6-LABEL: Checking a loop in 'i8_factor_6'
-; OPT-NF6: Cost of 7 for VF 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; OPT-NF6: Cost of 7 for VF 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
-; OPT-NF6: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; OPT-NF6: Cost of 8 for VF 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
-; OPT-NF6: Cost of 10 for VF 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; OPT-NF6: Cost of 10 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
-; OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
; OPT-NF6: Cost of 7 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
; OPT-NF6: Cost of 7 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
; OPT-NF6: Cost of 8 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
@@ -301,15 +318,16 @@ entry:
; OPT-NF6: Cost of 10 for VF vscale x 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
; OPT-NF6: Cost of 14 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
; OPT-NF6: Cost of 14 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_6'
+; FIXED-NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 24 for VF 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 24 for VF 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 48 for VF 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 48 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_6'
-; NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
-; NO-OPT: Cost of 24 for VF 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; NO-OPT: Cost of 24 for VF 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
-; NO-OPT: Cost of 48 for VF 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; NO-OPT: Cost of 48 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
-; NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
-; NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
; NO-OPT: Cost of 12 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
; NO-OPT: Cost of 12 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
; NO-OPT: Cost of 24 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
@@ -356,15 +374,16 @@ for.end:
define void @i8_factor_7(ptr %data, i64 %n) {
entry:
br label %for.body
+; FIXED-OPT-NF7-LABEL: Checking a loop in 'i8_factor_7'
+; FIXED-OPT-NF7: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-OPT-NF7: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; FIXED-OPT-NF7: Cost of 9 for VF 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-OPT-NF7: Cost of 9 for VF 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; FIXED-OPT-NF7: Cost of 11 for VF 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-OPT-NF7: Cost of 11 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; FIXED-OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
; OPT-NF7-LABEL: Checking a loop in 'i8_factor_7'
-; OPT-NF7: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; OPT-NF7: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
-; OPT-NF7: Cost of 9 for VF 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; OPT-NF7: Cost of 9 for VF 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
-; OPT-NF7: Cost of 11 for VF 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; OPT-NF7: Cost of 11 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
-; OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
; OPT-NF7: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
; OPT-NF7: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
; OPT-NF7: Cost of 9 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
@@ -373,15 +392,16 @@ entry:
; OPT-NF7: Cost of 11 for VF vscale x 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
; OPT-NF7: Cost of 15 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
; OPT-NF7: Cost of 15 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_7'
+; FIXED-NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 28 for VF 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 28 for VF 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 56 for VF 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 56 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_7'
-; NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
-; NO-OPT: Cost of 28 for VF 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; NO-OPT: Cost of 28 for VF 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
-; NO-OPT: Cost of 56 for VF 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; NO-OPT: Cost of 56 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
-; NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
-; NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
; NO-OPT: Cost of 14 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
; NO-OPT: Cost of 14 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
; NO-OPT: Cost of 28 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
@@ -432,15 +452,16 @@ for.end:
define void @i8_factor_8(ptr %data, i64 %n) {
entry:
br label %for.body
+; FIXED-OPT-NF8-LABEL: Checking a loop in 'i8_factor_8'
+; FIXED-OPT-NF8: Cost of 9 for VF 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-OPT-NF8: Cost of 9 for VF 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; FIXED-OPT-NF8: Cost of 10 for VF 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-OPT-NF8: Cost of 10 for VF 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; FIXED-OPT-NF8: Cost of 12 for VF 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-OPT-NF8: Cost of 12 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; FIXED-OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
; OPT-NF8-LABEL: Checking a loop in 'i8_factor_8'
-; OPT-NF8: Cost of 9 for VF 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; OPT-NF8: Cost of 9 for VF 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
-; OPT-NF8: Cost of 10 for VF 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; OPT-NF8: Cost of 10 for VF 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
-; OPT-NF8: Cost of 12 for VF 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; OPT-NF8: Cost of 12 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
-; OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
; OPT-NF8: Cost of 9 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
; OPT-NF8: Cost of 9 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
; OPT-NF8: Cost of 10 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
@@ -449,15 +470,16 @@ entry:
; OPT-NF8: Cost of 12 for VF vscale x 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
; OPT-NF8: Cost of 16 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
; OPT-NF8: Cost of 16 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; FIXED-NO-OPT-LABEL: Checking a loop in 'i8_factor_8'
+; FIXED-NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 32 for VF 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 32 for VF 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 64 for VF 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 64 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; FIXED-NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; FIXED-NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_8'
-; NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
-; NO-OPT: Cost of 32 for VF 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; NO-OPT: Cost of 32 for VF 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
-; NO-OPT: Cost of 64 for VF 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; NO-OPT: Cost of 64 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
-; NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
-; NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
; NO-OPT: Cost of 16 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
; NO-OPT: Cost of 16 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
; NO-OPT: Cost of 32 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll b/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll
index 93e0f90..946cb95 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/lmul.ll
@@ -46,124 +46,130 @@ define void @load_store(ptr %p) {
;
; LMUL2-LABEL: @load_store(
; LMUL2-NEXT: entry:
-; LMUL2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; LMUL2-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; LMUL2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; LMUL2-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; LMUL2-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; LMUL2: vector.ph:
; LMUL2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; LMUL2-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; LMUL2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; LMUL2-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; LMUL2-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
+; LMUL2-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP4]]
+; LMUL2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; LMUL2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; LMUL2-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; LMUL2-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2
; LMUL2-NEXT: br label [[VECTOR_BODY:%.*]]
; LMUL2: vector.body:
; LMUL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; LMUL2-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; LMUL2-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; LMUL2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[INDEX]]
-; LMUL2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP5]], align 8
+; LMUL2-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP5]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
; LMUL2-NEXT: [[TMP7:%.*]] = add <vscale x 2 x i64> [[WIDE_LOAD]], splat (i64 1)
-; LMUL2-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP5]], align 8
-; LMUL2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; LMUL2-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; LMUL2-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; LMUL2-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP7]], ptr align 8 [[TMP5]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
+; LMUL2-NEXT: [[TMP10:%.*]] = zext i32 [[TMP6]] to i64
+; LMUL2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]]
+; LMUL2-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; LMUL2-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; LMUL2-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; LMUL2: middle.block:
-; LMUL2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; LMUL2-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; LMUL2-NEXT: br label [[FOR_END:%.*]]
; LMUL2: scalar.ph:
-; LMUL2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; LMUL2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; LMUL2-NEXT: br label [[FOR_BODY:%.*]]
; LMUL2: for.body:
-; LMUL2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; LMUL2-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; LMUL2-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]]
; LMUL2-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8
; LMUL2-NEXT: [[W:%.*]] = add i64 [[V]], 1
; LMUL2-NEXT: store i64 [[W]], ptr [[Q]], align 8
; LMUL2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; LMUL2-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; LMUL2-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; LMUL2-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; LMUL2: for.end:
; LMUL2-NEXT: ret void
;
; LMUL4-LABEL: @load_store(
; LMUL4-NEXT: entry:
-; LMUL4-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; LMUL4-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; LMUL4-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; LMUL4-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; LMUL4-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; LMUL4: vector.ph:
; LMUL4-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; LMUL4-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; LMUL4-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; LMUL4-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; LMUL4-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
+; LMUL4-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP4]]
+; LMUL4-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; LMUL4-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; LMUL4-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; LMUL4-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
; LMUL4-NEXT: br label [[VECTOR_BODY:%.*]]
; LMUL4: vector.body:
; LMUL4-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; LMUL4-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; LMUL4-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; LMUL4-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[INDEX]]
-; LMUL4-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i64>, ptr [[TMP5]], align 8
+; LMUL4-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i64> @llvm.vp.load.nxv4i64.p0(ptr align 8 [[TMP5]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
; LMUL4-NEXT: [[TMP7:%.*]] = add <vscale x 4 x i64> [[WIDE_LOAD]], splat (i64 1)
-; LMUL4-NEXT: store <vscale x 4 x i64> [[TMP7]], ptr [[TMP5]], align 8
-; LMUL4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; LMUL4-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; LMUL4-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; LMUL4-NEXT: call void @llvm.vp.store.nxv4i64.p0(<vscale x 4 x i64> [[TMP7]], ptr align 8 [[TMP5]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
+; LMUL4-NEXT: [[TMP10:%.*]] = zext i32 [[TMP6]] to i64
+; LMUL4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]]
+; LMUL4-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; LMUL4-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; LMUL4-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; LMUL4: middle.block:
-; LMUL4-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; LMUL4-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; LMUL4-NEXT: br label [[FOR_END:%.*]]
; LMUL4: scalar.ph:
-; LMUL4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; LMUL4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; LMUL4-NEXT: br label [[FOR_BODY:%.*]]
; LMUL4: for.body:
-; LMUL4-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; LMUL4-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; LMUL4-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]]
; LMUL4-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8
; LMUL4-NEXT: [[W:%.*]] = add i64 [[V]], 1
; LMUL4-NEXT: store i64 [[W]], ptr [[Q]], align 8
; LMUL4-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; LMUL4-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; LMUL4-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; LMUL4-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; LMUL4: for.end:
; LMUL4-NEXT: ret void
;
; LMUL8-LABEL: @load_store(
; LMUL8-NEXT: entry:
-; LMUL8-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; LMUL8-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; LMUL8-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; LMUL8-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; LMUL8-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; LMUL8: vector.ph:
; LMUL8-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; LMUL8-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; LMUL8-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; LMUL8-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; LMUL8-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
+; LMUL8-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP4]]
+; LMUL8-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; LMUL8-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; LMUL8-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; LMUL8-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 8
; LMUL8-NEXT: br label [[VECTOR_BODY:%.*]]
; LMUL8: vector.body:
; LMUL8-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; LMUL8-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; LMUL8-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; LMUL8-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 [[INDEX]]
-; LMUL8-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i64>, ptr [[TMP5]], align 8
+; LMUL8-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0(ptr align 8 [[TMP5]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
; LMUL8-NEXT: [[TMP7:%.*]] = add <vscale x 8 x i64> [[WIDE_LOAD]], splat (i64 1)
-; LMUL8-NEXT: store <vscale x 8 x i64> [[TMP7]], ptr [[TMP5]], align 8
-; LMUL8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
-; LMUL8-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; LMUL8-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; LMUL8-NEXT: call void @llvm.vp.store.nxv8i64.p0(<vscale x 8 x i64> [[TMP7]], ptr align 8 [[TMP5]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
+; LMUL8-NEXT: [[TMP10:%.*]] = zext i32 [[TMP6]] to i64
+; LMUL8-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]]
+; LMUL8-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; LMUL8-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; LMUL8-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; LMUL8: middle.block:
-; LMUL8-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; LMUL8-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; LMUL8-NEXT: br label [[FOR_END:%.*]]
; LMUL8: scalar.ph:
-; LMUL8-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; LMUL8-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; LMUL8-NEXT: br label [[FOR_BODY:%.*]]
; LMUL8: for.body:
-; LMUL8-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; LMUL8-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; LMUL8-NEXT: [[Q:%.*]] = getelementptr inbounds i64, ptr [[P]], i64 [[IV]]
; LMUL8-NEXT: [[V:%.*]] = load i64, ptr [[Q]], align 8
; LMUL8-NEXT: [[W:%.*]] = add i64 [[V]], 1
; LMUL8-NEXT: store i64 [[W]], ptr [[Q]], align 8
; LMUL8-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; LMUL8-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; LMUL8-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; LMUL8-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; LMUL8: for.end:
; LMUL8-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
index 32cb426..aad789c 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
@@ -185,13 +185,22 @@ define void @trip16_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP0]], 8
+; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP9]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 16, [[TMP2]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP9]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP3]], 8
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1:%.*]], align 1
-; CHECK-NEXT: [[TMP2:%.*]] = shl <16 x i8> [[WIDE_LOAD]], splat (i8 1)
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP4:%.*]], align 1
-; CHECK-NEXT: [[TMP5:%.*]] = add <16 x i8> [[TMP2]], [[WIDE_LOAD1]]
-; CHECK-NEXT: store <16 x i8> [[TMP5]], ptr [[TMP4]], align 1
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 16, i32 8, i1 true)
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr align 1 [[TMP1:%.*]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: [[TMP6:%.*]] = shl <vscale x 8 x i8> [[VP_OP_LOAD]], splat (i8 1)
+; CHECK-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr align 1 [[TMP4:%.*]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 8 x i8> [[TMP6]], [[VP_OP_LOAD1]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP11]], ptr align 1 [[TMP4]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP5]])
; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
@@ -199,7 +208,7 @@ define void @trip16_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP7]], 1
@@ -239,13 +248,22 @@ define void @trip32_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP0]], 16
+; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP9]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 32, [[TMP2]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP9]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP3]], 16
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <32 x i8>, ptr [[TMP1:%.*]], align 1
-; CHECK-NEXT: [[TMP2:%.*]] = shl <32 x i8> [[WIDE_LOAD]], splat (i8 1)
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <32 x i8>, ptr [[TMP4:%.*]], align 1
-; CHECK-NEXT: [[TMP5:%.*]] = add <32 x i8> [[TMP2]], [[WIDE_LOAD1]]
-; CHECK-NEXT: store <32 x i8> [[TMP5]], ptr [[TMP4]], align 1
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 32, i32 16, i1 true)
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP1:%.*]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: [[TMP6:%.*]] = shl <vscale x 16 x i8> [[VP_OP_LOAD]], splat (i8 1)
+; CHECK-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP4:%.*]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: [[TMP11:%.*]] = add <vscale x 16 x i8> [[TMP6]], [[VP_OP_LOAD1]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP11]], ptr align 1 [[TMP4]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]])
; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
@@ -253,7 +271,7 @@ define void @trip32_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP7]], 1
@@ -292,26 +310,30 @@ define void @trip24_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 16
+; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 24, [[TMP2]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP1]], align 1
-; CHECK-NEXT: [[TMP3:%.*]] = shl <8 x i8> [[WIDE_LOAD]], splat (i8 1)
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i8>, ptr [[TMP4]], align 1
-; CHECK-NEXT: [[TMP6:%.*]] = add <8 x i8> [[TMP3]], [[WIDE_LOAD1]]
-; CHECK-NEXT: store <8 x i8> [[TMP6]], ptr [[TMP4]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24
-; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 24, i32 16, i1 true)
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[SRC:%.*]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: [[TMP6:%.*]] = shl <vscale x 16 x i8> [[VP_OP_LOAD]], splat (i8 1)
+; CHECK-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[DST:%.*]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 16 x i8> [[TMP6]], [[VP_OP_LOAD1]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP7]], ptr align 1 [[DST]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP5]])
+; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[I_08]]
; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP8]], 1
@@ -321,7 +343,7 @@ define void @trip24_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 24
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -360,35 +382,31 @@ define i8 @mul_non_pow_2_low_trip_count(ptr noalias %a) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i8> [ <i8 2, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[INDEX]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer
-; CHECK-NEXT: [[VEC_IV:%.*]] = add <16 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
-; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = icmp ule <16 x i64> [[VEC_IV]], splat (i64 9)
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i8> [ <i8 2, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, [[VECTOR_PH]] ], [ [[TMP1:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP0]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
-; CHECK-NEXT: [[TMP2]] = mul <16 x i8> [[WIDE_MASKED_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> [[TMP2]], <16 x i8> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1
+; CHECK-NEXT: [[TMP1]] = mul <8 x i8> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 8
+; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[TMP4:%.*]] = call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> [[TMP3]])
-; CHECK-NEXT: br label [[FOR_END:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> [[TMP1]])
+; CHECK-NEXT: br label [[SCALAR_PH]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i8 [ 2, [[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 8, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i8 [ [[TMP3]], [[MIDDLE_BLOCK]] ], [ 2, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi i8 [ 2, [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[RDX:%.*]] = phi i8 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[GEP]], align 1
; CHECK-NEXT: [[MUL]] = mul i8 [[TMP5]], [[RDX]]
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 10
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: for.end:
-; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i8 [ [[MUL]], [[FOR_BODY]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i8 [ [[MUL]], [[FOR_BODY]] ]
; CHECK-NEXT: ret i8 [[MUL_LCSSA]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll
index 10ba208..e5e694c 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll
@@ -11,15 +11,14 @@ target triple = "riscv64"
define void @test(ptr noalias nocapture %a, ptr noalias nocapture %b, i32 %v) {
; VLENUNK-LABEL: @test(
; VLENUNK-NEXT: entry:
-; VLENUNK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; VLENUNK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; VLENUNK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; VLENUNK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; VLENUNK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; VLENUNK: vector.ph:
; VLENUNK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; VLENUNK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; VLENUNK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; VLENUNK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; VLENUNK-NEXT: [[TMP12:%.*]] = sub i64 [[TMP3]], 1
+; VLENUNK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP12]]
+; VLENUNK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; VLENUNK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; VLENUNK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; VLENUNK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; VLENUNK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[V:%.*]], i64 0
@@ -27,32 +26,41 @@ define void @test(ptr noalias nocapture %a, ptr noalias nocapture %b, i32 %v) {
; VLENUNK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; VLENUNK-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 1)
; VLENUNK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP8]]
-; VLENUNK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP5]]
-; VLENUNK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP11]], i64 0
-; VLENUNK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; VLENUNK-NEXT: br label [[VECTOR_BODY:%.*]]
; VLENUNK: vector.body:
; VLENUNK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VLENUNK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; VLENUNK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; VLENUNK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; VLENUNK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
+; VLENUNK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; VLENUNK-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64
+; VLENUNK-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP15]]
+; VLENUNK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
+; VLENUNK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; VLENUNK-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; VLENUNK-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 4 x i32> [[TMP10]], [[BROADCAST_SPLAT4]]
; VLENUNK-NEXT: [[TMP13:%.*]] = icmp ult <vscale x 4 x i64> [[VEC_IND]], splat (i64 512)
+; VLENUNK-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP11]], <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> zeroinitializer
; VLENUNK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX]]
-; VLENUNK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP14]], i32 4, <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i32> poison)
-; VLENUNK-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x i32> zeroinitializer
+; VLENUNK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> [[TMP13]], i32 [[TMP7]])
+; VLENUNK-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i32> zeroinitializer
; VLENUNK-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[PREDPHI]], [[BROADCAST_SPLAT]]
; VLENUNK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
-; VLENUNK-NEXT: store <vscale x 4 x i32> [[TMP17]], ptr [[TMP18]], align 4
-; VLENUNK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; VLENUNK-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP17]], ptr align 4 [[TMP18]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; VLENUNK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64
+; VLENUNK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]]
+; VLENUNK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; VLENUNK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; VLENUNK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; VLENUNK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; VLENUNK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VLENUNK: middle.block:
-; VLENUNK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; VLENUNK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; VLENUNK-NEXT: br label [[FOR_END:%.*]]
; VLENUNK: scalar.ph:
-; VLENUNK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; VLENUNK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; VLENUNK-NEXT: br label [[FOR_BODY:%.*]]
; VLENUNK: for.body:
-; VLENUNK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
+; VLENUNK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ]
; VLENUNK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[IV]], 512
; VLENUNK-NEXT: br i1 [[ICMP]], label [[DO_LOAD:%.*]], label [[LATCH]]
; VLENUNK: do_load:
@@ -66,7 +74,7 @@ define void @test(ptr noalias nocapture %a, ptr noalias nocapture %b, i32 %v) {
; VLENUNK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
; VLENUNK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; VLENUNK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; VLENUNK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; VLENUNK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; VLENUNK: for.end:
; VLENUNK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
index 6800a93..b28d801 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
@@ -16,11 +16,7 @@
define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture readonly %trigger) local_unnamed_addr #0 {
; RV32-LABEL: @foo4(
; RV32-NEXT: entry:
-; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
-; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 625, [[TMP2]]
-; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
+; RV32-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; RV32: vector.memcheck:
; RV32-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 79880
; RV32-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[TRIGGER:%.*]], i32 39940
@@ -36,40 +32,45 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV32: vector.ph:
; RV32-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
-; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 625, [[TMP4]]
-; RV32-NEXT: [[N_VEC:%.*]] = sub i64 625, [[N_MOD_VF]]
+; RV32-NEXT: [[TMP2:%.*]] = sub i64 [[TMP4]], 1
+; RV32-NEXT: [[N_RND_UP:%.*]] = add i64 625, [[TMP2]]
+; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
+; RV32-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; RV32-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
-; RV32-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 16
; RV32-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; RV32-NEXT: [[TMP9:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 16)
; RV32-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP9]]
-; RV32-NEXT: [[TMP12:%.*]] = mul i64 16, [[TMP6]]
-; RV32-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP12]], i64 0
-; RV32-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; RV32-NEXT: br label [[VECTOR_BODY:%.*]]
; RV32: vector.body:
; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; RV32-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; RV32-NEXT: [[AVL:%.*]] = phi i64 [ 625, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; RV32-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; RV32-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
+; RV32-NEXT: [[TMP11:%.*]] = mul i64 16, [[TMP8]]
+; RV32-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP11]], i64 0
+; RV32-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], <vscale x 2 x i64> [[VEC_IND]]
-; RV32-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> [[TMP13]], i32 4, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i32> poison), !alias.scope [[META0:![0-9]+]]
+; RV32-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.vp.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP13]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]]
; RV32-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 2 x i32> [[WIDE_MASKED_GATHER]], splat (i32 100)
; RV32-NEXT: [[TMP15:%.*]] = shl nuw nsw <vscale x 2 x i64> [[VEC_IND]], splat (i64 1)
; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, ptr [[B]], <vscale x 2 x i64> [[TMP15]]
-; RV32-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> [[TMP16]], i32 8, <vscale x 2 x i1> [[TMP14]], <vscale x 2 x double> poison), !alias.scope [[META3:![0-9]+]]
+; RV32-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP16]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META3:![0-9]+]]
; RV32-NEXT: [[TMP17:%.*]] = sitofp <vscale x 2 x i32> [[WIDE_MASKED_GATHER]] to <vscale x 2 x double>
; RV32-NEXT: [[TMP18:%.*]] = fadd <vscale x 2 x double> [[WIDE_MASKED_GATHER6]], [[TMP17]]
; RV32-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], <vscale x 2 x i64> [[VEC_IND]]
-; RV32-NEXT: call void @llvm.masked.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[TMP18]], <vscale x 2 x ptr> [[TMP19]], i32 8, <vscale x 2 x i1> [[TMP14]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]]
-; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; RV32-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[TMP18]], <vscale x 2 x ptr> align 8 [[TMP19]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]]
+; RV32-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64
+; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP20]], [[INDEX]]
+; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; RV32-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; RV32-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV32-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; RV32-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 625
+; RV32-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; RV32: middle.block:
-; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 625, [[N_VEC]]
-; RV32-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; RV32-NEXT: br label [[FOR_END:%.*]]
; RV32: scalar.ph:
-; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
+; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; RV32-NEXT: br label [[FOR_BODY:%.*]]
; RV32: for.body:
; RV32-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
@@ -89,17 +90,13 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV32: for.inc:
; RV32-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 16
; RV32-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], 10000
-; RV32-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP11:![0-9]+]]
+; RV32-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP12:![0-9]+]]
; RV32: for.end:
; RV32-NEXT: ret void
;
; RV64-LABEL: @foo4(
; RV64-NEXT: entry:
-; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
-; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 625, [[TMP2]]
-; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
+; RV64-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; RV64: vector.memcheck:
; RV64-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 79880
; RV64-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[TRIGGER:%.*]], i64 39940
@@ -115,40 +112,45 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV64: vector.ph:
; RV64-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
-; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 625, [[TMP4]]
-; RV64-NEXT: [[N_VEC:%.*]] = sub i64 625, [[N_MOD_VF]]
+; RV64-NEXT: [[TMP2:%.*]] = sub i64 [[TMP4]], 1
+; RV64-NEXT: [[N_RND_UP:%.*]] = add i64 625, [[TMP2]]
+; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
+; RV64-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; RV64-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
-; RV64-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 16
; RV64-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; RV64-NEXT: [[TMP9:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 16)
; RV64-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP9]]
-; RV64-NEXT: [[TMP12:%.*]] = mul i64 16, [[TMP6]]
-; RV64-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP12]], i64 0
-; RV64-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; RV64-NEXT: br label [[VECTOR_BODY:%.*]]
; RV64: vector.body:
; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; RV64-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; RV64-NEXT: [[AVL:%.*]] = phi i64 [ 625, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; RV64-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; RV64-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
+; RV64-NEXT: [[TMP11:%.*]] = mul i64 16, [[TMP8]]
+; RV64-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP11]], i64 0
+; RV64-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TRIGGER]], <vscale x 2 x i64> [[VEC_IND]]
-; RV64-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> [[TMP13]], i32 4, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i32> poison), !alias.scope [[META0:![0-9]+]]
+; RV64-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i32> @llvm.vp.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr> align 4 [[TMP13]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]]
; RV64-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 2 x i32> [[WIDE_MASKED_GATHER]], splat (i32 100)
; RV64-NEXT: [[TMP15:%.*]] = shl nuw nsw <vscale x 2 x i64> [[VEC_IND]], splat (i64 1)
; RV64-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, ptr [[B]], <vscale x 2 x i64> [[TMP15]]
-; RV64-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> [[TMP16]], i32 8, <vscale x 2 x i1> [[TMP14]], <vscale x 2 x double> poison), !alias.scope [[META3:![0-9]+]]
+; RV64-NEXT: [[WIDE_MASKED_GATHER6:%.*]] = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP16]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META3:![0-9]+]]
; RV64-NEXT: [[TMP17:%.*]] = sitofp <vscale x 2 x i32> [[WIDE_MASKED_GATHER]] to <vscale x 2 x double>
; RV64-NEXT: [[TMP18:%.*]] = fadd <vscale x 2 x double> [[WIDE_MASKED_GATHER6]], [[TMP17]]
; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], <vscale x 2 x i64> [[VEC_IND]]
-; RV64-NEXT: call void @llvm.masked.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[TMP18]], <vscale x 2 x ptr> [[TMP19]], i32 8, <vscale x 2 x i1> [[TMP14]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]]
-; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; RV64-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[TMP18]], <vscale x 2 x ptr> align 8 [[TMP19]], <vscale x 2 x i1> [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]]
+; RV64-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64
+; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP20]], [[INDEX]]
+; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; RV64-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; RV64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV64-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; RV64-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 625
+; RV64-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; RV64: middle.block:
-; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 625, [[N_VEC]]
-; RV64-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; RV64-NEXT: br label [[FOR_END:%.*]]
; RV64: scalar.ph:
-; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
+; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; RV64-NEXT: br label [[FOR_BODY:%.*]]
; RV64: for.body:
; RV64-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
@@ -168,7 +170,7 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
; RV64: for.inc:
; RV64-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 16
; RV64-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], 10000
-; RV64-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP11:![0-9]+]]
+; RV64-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP12:![0-9]+]]
; RV64: for.end:
; RV64-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
index ee6b950..10af538 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
@@ -1,8 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph:" --version 4
-; RUN: opt -passes=loop-vectorize -mattr=+v -S < %s | FileCheck %s --check-prefixes=CHECK,V
-; RUN: opt -passes=loop-vectorize -mattr=+v,+experimental-zvqdotq -S < %s | FileCheck %s --check-prefixes=CHECK,ZVQDOTQ
-; RUN: opt -passes=loop-vectorize -mattr=+v -scalable-vectorization=off -S < %s | FileCheck %s --check-prefixes=FIXED,FIXED-V
-; RUN: opt -passes=loop-vectorize -mattr=+v,+experimental-zvqdotq -scalable-vectorization=off -S < %s | FileCheck %s --check-prefixes=FIXED,FIXED-ZVQDOTQ
+; RUN: opt -passes=loop-vectorize -mattr=+v -prefer-predicate-over-epilogue=scalar-epilogue -S < %s | FileCheck %s --check-prefixes=CHECK,V
+; RUN: opt -passes=loop-vectorize -mattr=+v,+experimental-zvqdotq -prefer-predicate-over-epilogue=scalar-epilogue -S < %s | FileCheck %s --check-prefixes=CHECK,ZVQDOTQ
+; RUN: opt -passes=loop-vectorize -mattr=+v -scalable-vectorization=off -prefer-predicate-over-epilogue=scalar-epilogue -S < %s | FileCheck %s --check-prefixes=FIXED,FIXED-V
+; RUN: opt -passes=loop-vectorize -mattr=+v,+experimental-zvqdotq -scalable-vectorization=off -prefer-predicate-over-epilogue=scalar-epilogue -S < %s | FileCheck %s --check-prefixes=FIXED,FIXED-ZVQDOTQ
+
+; TODO: Remove -prefer-predicate-over-epilogue=scalar-epilogue when partial reductions with EVL tail folding is supported.
target triple = "riscv64-none-unknown-elf"
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
index b5b62d0..cad15d1 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
@@ -9,15 +9,14 @@ define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64
; CHECK-LABEL: define void @pr87378_vpinstruction_or_drop_poison_generating_flags(
; CHECK-SAME: ptr [[ARG:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1001, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1001, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1001, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1001, [[TMP12]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[A]], i64 0
@@ -29,39 +28,49 @@ define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 8 x i64> [[TMP6]], splat (i64 1)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP7]]
-; CHECK-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP5]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP8]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1001, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP25:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <vscale x 8 x i32> poison, i32 [[TMP25]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <vscale x 8 x i32> [[BROADCAST_SPLATINSERT7]], <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP25]] to i64
+; CHECK-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP8]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP9]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT5]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 8 x i32> [[TMP10]], [[BROADCAST_SPLAT8]]
; CHECK-NEXT: [[TMP13:%.*]] = icmp ule <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP28:%.*]] = select <vscale x 8 x i1> [[TMP11]], <vscale x 8 x i1> [[TMP13]], <vscale x 8 x i1> zeroinitializer
; CHECK-NEXT: [[TMP14:%.*]] = icmp ule <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
-; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 8 x i1> [[TMP13]], <vscale x 8 x i1> [[TMP14]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 8 x i1> [[TMP28]], <vscale x 8 x i1> [[TMP14]], <vscale x 8 x i1> zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = xor <vscale x 8 x i1> [[TMP13]], splat (i1 true)
-; CHECK-NEXT: [[TMP17:%.*]] = or <vscale x 8 x i1> [[TMP15]], [[TMP16]]
+; CHECK-NEXT: [[TMP29:%.*]] = select <vscale x 8 x i1> [[TMP11]], <vscale x 8 x i1> [[TMP16]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = or <vscale x 8 x i1> [[TMP15]], [[TMP29]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp ule <vscale x 8 x i64> [[VEC_IND]], [[BROADCAST_SPLAT4]]
; CHECK-NEXT: [[TMP19:%.*]] = select <vscale x 8 x i1> [[TMP17]], <vscale x 8 x i1> [[TMP18]], <vscale x 8 x i1> zeroinitializer
; CHECK-NEXT: [[TMP20:%.*]] = xor <vscale x 8 x i1> [[TMP14]], splat (i1 true)
-; CHECK-NEXT: [[TMP21:%.*]] = select <vscale x 8 x i1> [[TMP13]], <vscale x 8 x i1> [[TMP20]], <vscale x 8 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = select <vscale x 8 x i1> [[TMP28]], <vscale x 8 x i1> [[TMP20]], <vscale x 8 x i1> zeroinitializer
; CHECK-NEXT: [[TMP22:%.*]] = or <vscale x 8 x i1> [[TMP19]], [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = extractelement <vscale x 8 x i1> [[TMP21]], i32 0
; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[TMP23]], i64 poison, i64 [[INDEX]]
; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i16, ptr [[ARG]], i64 [[PREDPHI]]
-; CHECK-NEXT: call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> zeroinitializer, ptr [[TMP24]], i32 2, <vscale x 8 x i1> [[TMP22]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv8i16.p0(<vscale x 8 x i16> zeroinitializer, ptr align 2 [[TMP24]], <vscale x 8 x i1> [[TMP22]], i32 [[TMP25]])
+; CHECK-NEXT: [[TMP26:%.*]] = zext i32 [[TMP25]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP26]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP26]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1001
+; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1001, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
; CHECK-NEXT: [[C_1:%.*]] = icmp ule i64 [[IV]], [[A]]
; CHECK-NEXT: br i1 [[C_1]], label [[THEN_1:%.*]], label [[ELSE_1:%.*]]
; CHECK: then.1:
@@ -80,7 +89,7 @@ define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64
; CHECK: loop.latch:
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i64 [[IV]], 1000
-; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT]], label [[LOOP_HEADER]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -118,8 +127,9 @@ exit:
ret void
}
;.
-; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll
index 554ce7b..39e865b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll
@@ -9,46 +9,49 @@ define i32 @add(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-LABEL: define i32 @add(
; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP12]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
-; CHECK-NEXT: [[TMP7]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP7]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP13]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP7]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP8]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP10]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
;
entry:
@@ -74,46 +77,49 @@ define i32 @or(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-LABEL: define i32 @or(
; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP12]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
-; CHECK-NEXT: [[TMP7]] = or <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP7:%.*]] = or <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP7]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP13]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP7]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP8]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[OR:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[OR:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[OR]] = or i32 [[TMP10]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[OR_LCSSA]]
;
entry:
@@ -139,46 +145,49 @@ define i32 @and(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-LABEL: define i32 @and(
; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP12]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> splat (i32 -1), i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> splat (i32 -1), i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
-; CHECK-NEXT: [[TMP7]] = and <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP7:%.*]] = and <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP7]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP13]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[TMP7]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[TMP8]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[AND:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[AND:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[AND]] = and i32 [[TMP10]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[AND_LCSSA]]
;
entry:
@@ -204,46 +213,49 @@ define i32 @xor(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-LABEL: define i32 @xor(
; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP12]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
-; CHECK-NEXT: [[TMP7]] = xor <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP7:%.*]] = xor <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP7]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP13]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP7]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP8]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[XOR]] = xor i32 [[TMP10]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[XOR_LCSSA]]
;
entry:
@@ -269,48 +281,51 @@ define i32 @smin(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-LABEL: define i32 @smin(
; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP13]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP8]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP11]], [[SUM_010]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -337,48 +352,51 @@ define i32 @umax(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-LABEL: define i32 @umax(
; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP13]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP8]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP11]], [[SUM_010]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -405,46 +423,49 @@ define float @fadd_fast(ptr noalias nocapture readonly %a, i64 %n) {
; CHECK-LABEL: define float @fadd_fast(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP12]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
-; CHECK-NEXT: [[TMP7]] = fadd fast <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP7:%.*]] = fadd fast <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP7]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP13]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP9:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP7]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP11:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP8]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = fadd fast float [[TMP10]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[ADD_LCSSA]]
;
entry:
@@ -468,46 +489,49 @@ define half @fadd_fast_half_zvfh(ptr noalias nocapture readonly %a, i64 %n) "tar
; CHECK-LABEL: define half @fadd_fast_half_zvfh(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP12]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
-; CHECK-NEXT: [[TMP7]] = fadd fast <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP7:%.*]] = fadd fast <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x half> [[TMP7]], <vscale x 8 x half> [[VEC_PHI]], i32 [[TMP13]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP9:%.*]] = call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, <vscale x 8 x half> [[TMP7]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP11:%.*]] = call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, <vscale x 8 x half> [[TMP8]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ 0xH0000, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load half, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP10]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP11]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret half [[ADD_LCSSA]]
;
entry:
@@ -549,7 +573,7 @@ define half @fadd_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) "
; CHECK-NEXT: [[TMP3]] = fadd fast <16 x half> [[WIDE_LOAD2]], [[VEC_PHI1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x half> [[TMP3]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> [[BIN_RDX]])
@@ -567,7 +591,7 @@ define half @fadd_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) "
; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP6]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret half [[ADD_LCSSA]]
@@ -611,7 +635,7 @@ define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) "targ
; CHECK-NEXT: [[TMP3]] = fadd fast <16 x bfloat> [[WIDE_LOAD2]], [[VEC_PHI1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x bfloat> [[TMP3]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call fast bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR0000, <16 x bfloat> [[BIN_RDX]])
@@ -629,7 +653,7 @@ define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) "targ
; CHECK-NEXT: [[ADD]] = fadd fast bfloat [[TMP6]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi bfloat [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret bfloat [[ADD_LCSSA]]
@@ -657,48 +681,51 @@ define float @fmin_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-LABEL: define float @fmin_fast(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR4:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP13]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP8]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt float [[TMP11]], [[SUM_07]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -723,48 +750,51 @@ define half @fmin_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) #
; CHECK-LABEL: define half @fmin_fast_half_zvfhmin(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR5:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP13]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x half> [[TMP8]], <vscale x 8 x half> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call half @llvm.vector.reduce.fmin.nxv8f16(<vscale x 8 x half> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call half @llvm.vector.reduce.fmin.nxv8f16(<vscale x 8 x half> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ 0xH0000, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt half [[TMP11]], [[SUM_07]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -789,48 +819,51 @@ define bfloat @fmin_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64
; CHECK-LABEL: define bfloat @fmin_fast_bfloat_zvfbfmin(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR6:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP13]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x bfloat> @llvm.vp.load.nxv8bf16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 8 x bfloat> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x bfloat> @llvm.vp.merge.nxv8bf16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x bfloat> [[TMP8]], <vscale x 8 x bfloat> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call bfloat @llvm.vector.reduce.fmin.nxv8bf16(<vscale x 8 x bfloat> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call bfloat @llvm.vector.reduce.fmin.nxv8bf16(<vscale x 8 x bfloat> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xR0000, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ 0xR0000, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ 0xR0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt bfloat [[TMP11]], [[SUM_07]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -857,48 +890,51 @@ define float @fmax_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-LABEL: define float @fmax_fast(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR4]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP13]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP8]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt float [[TMP11]], [[SUM_07]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -923,48 +959,51 @@ define half @fmax_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) #
; CHECK-LABEL: define half @fmax_fast_half_zvfhmin(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR5]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP13]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x half> [[TMP8]], <vscale x 8 x half> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call fast half @llvm.vector.reduce.fmax.nxv8f16(<vscale x 8 x half> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call fast half @llvm.vector.reduce.fmax.nxv8f16(<vscale x 8 x half> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ 0xH0000, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt half [[TMP11]], [[SUM_07]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -989,48 +1028,51 @@ define bfloat @fmax_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64
; CHECK-LABEL: define bfloat @fmax_fast_bfloat_zvfbfmin(
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR6]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP13]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x bfloat> @llvm.vp.load.nxv8bf16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 8 x bfloat> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x bfloat> @llvm.vp.merge.nxv8bf16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x bfloat> [[TMP8]], <vscale x 8 x bfloat> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16(<vscale x 8 x bfloat> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP12:%.*]] = call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16(<vscale x 8 x bfloat> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xR0000, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ 0xR0000, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ 0xR0000, %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt bfloat [[TMP11]], [[SUM_07]]
; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP12]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]]
;
entry:
@@ -1077,7 +1119,7 @@ define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-NEXT: [[TMP3]] = mul <8 x i32> [[WIDE_LOAD2]], [[VEC_PHI1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[BIN_RDX:%.*]] = mul <8 x i32> [[TMP3]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[BIN_RDX]])
@@ -1095,7 +1137,7 @@ define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
; CHECK-NEXT: [[MUL]] = mul nsw i32 [[TMP6]], [[SUM_07]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[MUL_LCSSA]]
@@ -1142,7 +1184,7 @@ define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture re
; CHECK-NEXT: [[TMP5]] = mul <8 x i32> [[WIDE_LOAD1]], [[VEC_PHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP5]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
@@ -1165,7 +1207,7 @@ define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture re
; CHECK-NEXT: [[MUL]] = mul nsw i32 [[TMP9]], [[SUM]]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[MUL_LCSSA]]
@@ -1197,40 +1239,43 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n) {
; CHECK-LABEL: define float @fmuladd(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP13]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
-; CHECK-NEXT: [[TMP8]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD1]], <vscale x 4 x float> [[VEC_PHI]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
+; CHECK-NEXT: [[TMP8:%.*]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD1]], <vscale x 4 x float> [[VEC_PHI]])
+; CHECK-NEXT: [[TMP9]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP8]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP16:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
@@ -1238,9 +1283,9 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[SUM_07]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[MULADD_LCSSA]]
;
entry:
@@ -1266,40 +1311,43 @@ define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh"
; CHECK-LABEL: define half @fmuladd_f16_zvfh(
; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP13]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ insertelement (<vscale x 8 x half> splat (half 0xH8000), half 0xH0000, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ insertelement (<vscale x 8 x half> splat (half 0xH8000), half 0xH0000, i32 0), %[[VECTOR_PH]] ], [ [[TMP9:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x half>, ptr [[TMP7]], align 4
-; CHECK-NEXT: [[TMP8]] = call reassoc <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD1]], <vscale x 8 x half> [[VEC_PHI]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0(ptr align 4 [[TMP7]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP14]])
+; CHECK-NEXT: [[TMP8:%.*]] = call reassoc <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD1]], <vscale x 8 x half> [[VEC_PHI]])
+; CHECK-NEXT: [[TMP9]] = call <vscale x 8 x half> @llvm.vp.merge.nxv8f16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x half> [[TMP8]], <vscale x 8 x half> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[TMP10:%.*]] = call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, <vscale x 8 x half> [[TMP8]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: [[TMP16:%.*]] = call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, <vscale x 8 x half> [[TMP9]])
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ 0xH0000, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ 0xH0000, %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[IV]]
@@ -1307,9 +1355,9 @@ define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh"
; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP11]], half [[TMP12]], half [[SUM_07]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
; CHECK: [[FOR_END]]:
-; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret half [[MULADD_LCSSA]]
;
entry:
@@ -1360,7 +1408,7 @@ define half @fmuladd_f16_zvfhmin(ptr %a, ptr %b, i64 %n) "target-features"="+zvf
; CHECK-NEXT: [[TMP5]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD2]], <16 x half> [[WIDE_LOAD4]], <16 x half> [[VEC_PHI1]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <16 x half> [[TMP5]], [[TMP4]]
; CHECK-NEXT: [[TMP7:%.*]] = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH8000, <16 x half> [[BIN_RDX]])
@@ -1380,7 +1428,7 @@ define half @fmuladd_f16_zvfhmin(ptr %a, ptr %b, i64 %n) "target-features"="+zvf
; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP8]], half [[TMP9]], half [[SUM_07]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret half [[MULADD_LCSSA]]
@@ -1430,7 +1478,7 @@ define bfloat @fmuladd_bf16(ptr %a, ptr %b, i64 %n) "target-features"="+zvfbfmin
; CHECK-NEXT: [[TMP5]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD2]], <16 x bfloat> [[WIDE_LOAD4]], <16 x bfloat> [[VEC_PHI1]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <16 x bfloat> [[TMP5]], [[TMP4]]
; CHECK-NEXT: [[TMP7:%.*]] = call reassoc bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR8000, <16 x bfloat> [[BIN_RDX]])
@@ -1450,7 +1498,7 @@ define bfloat @fmuladd_bf16(ptr %a, ptr %b, i64 %n) "target-features"="+zvfbfmin
; CHECK-NEXT: [[MULADD]] = tail call reassoc bfloat @llvm.fmuladd.bf16(bfloat [[TMP8]], bfloat [[TMP9]], bfloat [[SUM_07]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP46:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi bfloat [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret bfloat [[MULADD_LCSSA]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-bf16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-bf16.ll
index 5a67b54..346f1cb 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-bf16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-bf16.ll
@@ -1,5 +1,9 @@
; REQUIRES: asserts
-; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfbfmin -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s
+; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfbfmin -prefer-predicate-over-epilogue=scalar-epilogue -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s
+
+; TODO: -prefer-predicate-over-epilogue=scalar-epilogue was added to allow
+; unrolling. Calculate register pressure for all VPlans, not just unrolled ones,
+; and remove.
define void @add(ptr noalias nocapture readonly %src1, ptr noalias nocapture readonly %src2, i32 signext %size, ptr noalias nocapture writeonly %result) {
; CHECK-LABEL: add
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-f16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-f16.ll
index d4909fa..b25bc48 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-f16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-f16.ll
@@ -1,6 +1,10 @@
; REQUIRES: asserts
-; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfh -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s --check-prefix=ZVFH
-; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfhmin -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s --check-prefix=ZVFHMIN
+; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfh -prefer-predicate-over-epilogue=scalar-epilogue -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s --check-prefix=ZVFH
+; RUN: opt -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfhmin -prefer-predicate-over-epilogue=scalar-epilogue -debug-only=loop-vectorize,vplan --disable-output -riscv-v-register-bit-width-lmul=1 -S < %s 2>&1 | FileCheck %s --check-prefix=ZVFHMIN
+
+; TODO: -prefer-predicate-over-epilogue=scalar-epilogue was added to allow
+; unrolling. Calculate register pressure for all VPlans, not just unrolled ones,
+; and remove.
define void @add(ptr noalias nocapture readonly %src1, ptr noalias nocapture readonly %src2, i32 signext %size, ptr noalias nocapture writeonly %result) {
; CHECK-LABEL: add
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage.ll
index 7037282..116ccc9 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage.ll
@@ -1,25 +1,29 @@
; REQUIRES: asserts
; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \
; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \
-; RUN: -force-vector-width=1 \
+; RUN: -force-vector-width=1 -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-SCALAR
; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \
; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \
-; RUN: -riscv-v-register-bit-width-lmul=1 \
+; RUN: -riscv-v-register-bit-width-lmul=1 -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-LMUL1
; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \
; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \
-; RUN: -riscv-v-register-bit-width-lmul=2 \
+; RUN: -riscv-v-register-bit-width-lmul=2 -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-LMUL2
; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \
; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \
-; RUN: -riscv-v-register-bit-width-lmul=4 \
+; RUN: -riscv-v-register-bit-width-lmul=4 -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-LMUL4
; RUN: opt -passes=loop-vectorize -mtriple riscv64-linux-gnu \
; RUN: -mattr=+v,+d -debug-only=loop-vectorize,vplan --disable-output \
-; RUN: -riscv-v-register-bit-width-lmul=8 \
+; RUN: -riscv-v-register-bit-width-lmul=8 -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK-LMUL8
+; TODO: -prefer-predicate-over-epilogue=scalar-epilogue was added to allow
+; unrolling. Calculate register pressure for all VPlans, not just unrolled ones,
+; and remove.
+
define void @add(ptr noalias nocapture readonly %src1, ptr noalias nocapture readonly %src2, i32 signext %size, ptr noalias nocapture writeonly %result) {
; CHECK-LABEL: add
; CHECK-SCALAR: LV(REG): VF = 1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll
index 85163c7..a0d29e8 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/remark-reductions.ll
@@ -5,41 +5,42 @@ define float @s311(float %a_0, float %s311_sum) {
; CHECK-LABEL: define float @s311(
; CHECK-SAME: float [[A_0:%.*]], float [[S311_SUM:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 1200, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 1200, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 1200, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP8:%.*]] = sub i32 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 1200, [[TMP8]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[A_0]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi float [ [[S311_SUM]], %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[BROADCAST_SPLAT]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 1200, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[TMP6]] = call float @llvm.vp.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[BROADCAST_SPLAT]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP9]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 1200
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 1200, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ [[S311_SUM]], %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[S311_SUM]], %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi float [ [[S311_SUM]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[RED_NEXT]] = fadd float [[A_0]], [[RED]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[IV_NEXT]], 1200
-; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[RED_LCSSA:%.*]] = phi float [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret float [[RED_LCSSA]]
@@ -60,8 +61,9 @@ exit:
ret float %red.lcssa
}
;.
-; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
index 0b3dcf8..186573f 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse.ll
@@ -19,98 +19,100 @@ define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) {
; RV64-LABEL: define void @vector_reverse_i32(
; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] {
; RV64-NEXT: [[ENTRY:.*]]:
-; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]]
-; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; RV64: [[VECTOR_PH]]:
; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]]
-; RV64-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]]
+; RV64-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; RV64-NEXT: [[N_RND_UP:%.*]] = add i64 1023, [[TMP6]]
+; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; RV64-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; RV64-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; RV64-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]]
; RV64-NEXT: br label %[[VECTOR_BODY:.*]]
; RV64: [[VECTOR_BODY]]:
-; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
; RV64-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1
; RV64-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP7]]
-; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP5]]
-; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP5]], 1
+; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP19]] to i64
+; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP24]]
+; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP24]], 1
; RV64-NEXT: [[TMP11:%.*]] = mul i64 -1, [[TMP10]]
-; RV64-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 [[TMP9]]
-; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 [[TMP11]]
-; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4
-; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
+; RV64-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[TMP8]], i64 [[TMP9]]
+; RV64-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP12]], i64 [[TMP11]]
+; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
+; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
; RV64-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
; RV64-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP7]]
-; RV64-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]]
-; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP5]], 1
+; RV64-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64
+; RV64-NEXT: [[TMP25:%.*]] = mul i64 0, [[TMP16]]
+; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP16]], 1
; RV64-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]]
-; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[TMP16]]
-; RV64-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP19]], i64 [[TMP18]]
-; RV64-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP14]])
-; RV64-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP20]], align 4
-; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; RV64-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV64-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; RV64-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[TMP15]], i64 [[TMP25]]
+; RV64-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[TMP20]], i64 [[TMP18]]
+; RV64-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
+; RV64-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE1]], ptr align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
+; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP19]] to i64
+; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[INDEX]]
+; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
+; RV64-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023
+; RV64-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; RV64: [[MIDDLE_BLOCK]]:
-; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]]
-; RV64-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; RV64-NEXT: br [[EXIT:label %.*]]
; RV64: [[SCALAR_PH]]:
-; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
+; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1023, %[[ENTRY]] ]
; RV64-NEXT: br label %[[FOR_BODY:.*]]
; RV64: [[FOR_BODY]]:
;
; RV32-LABEL: define void @vector_reverse_i32(
; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] {
; RV32-NEXT: [[ENTRY:.*]]:
-; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]]
-; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; RV32: [[VECTOR_PH]]:
; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]]
-; RV32-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]]
+; RV32-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; RV32-NEXT: [[N_RND_UP:%.*]] = add i64 1023, [[TMP6]]
+; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; RV32-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; RV32-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; RV32-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]]
; RV32-NEXT: br label %[[VECTOR_BODY:.*]]
; RV32: [[VECTOR_BODY]]:
-; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
; RV32-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1
; RV32-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP7]]
-; RV32-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP5]] to i32
; RV32-NEXT: [[TMP10:%.*]] = mul i32 0, [[TMP9]]
; RV32-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], 1
; RV32-NEXT: [[TMP12:%.*]] = mul i32 -1, [[TMP11]]
-; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 [[TMP10]]
-; RV32-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i32 [[TMP12]]
-; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4
-; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
+; RV32-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP8]], i32 [[TMP10]]
+; RV32-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP13]], i32 [[TMP12]]
+; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; RV32-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP7]]
-; RV32-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP5]] to i32
-; RV32-NEXT: [[TMP18:%.*]] = mul i32 0, [[TMP17]]
-; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP17]], 1
+; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP9]]
+; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP9]], 1
; RV32-NEXT: [[TMP20:%.*]] = mul i32 -1, [[TMP19]]
-; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i32 [[TMP18]]
-; RV32-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i32 [[TMP20]]
-; RV32-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP15]])
-; RV32-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP22]], align 4
-; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; RV32-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV32-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; RV32-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP16]], i32 [[TMP17]]
+; RV32-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP18]], i32 [[TMP20]]
+; RV32-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; RV32-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE1]], ptr align 4 [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; RV32-NEXT: [[TMP23:%.*]] = zext i32 [[TMP9]] to i64
+; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP23]], [[INDEX]]
+; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP23]]
+; RV32-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023
+; RV32-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; RV32: [[MIDDLE_BLOCK]]:
-; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]]
-; RV32-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; RV32-NEXT: br [[EXIT:label %.*]]
; RV32: [[SCALAR_PH]]:
-; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
+; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1023, %[[ENTRY]] ]
; RV32-NEXT: br label %[[FOR_BODY:.*]]
; RV32: [[FOR_BODY]]:
;
@@ -206,10 +208,7 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV64-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
; RV64: [[FOR_BODY_PREHEADER]]:
; RV64-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
-; RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
-; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
-; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
+; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
; RV64: [[VECTOR_SCEVCHECK]]:
; RV64-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1
; RV64-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1
@@ -233,46 +232,50 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV64: [[VECTOR_PH]]:
; RV64-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4
-; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP16]]
-; RV64-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
+; RV64-NEXT: [[TMP19:%.*]] = sub i64 [[TMP16]], 1
+; RV64-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP19]]
+; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP16]]
+; RV64-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; RV64-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4
-; RV64-NEXT: [[TMP19:%.*]] = sub i64 [[TMP0]], [[N_VEC]]
-; RV64-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
-; RV64-NEXT: [[TMP20:%.*]] = sub i32 [[N]], [[DOTCAST]]
; RV64-NEXT: br label %[[VECTOR_BODY:.*]]
; RV64: [[VECTOR_BODY]]:
-; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[TMP20:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV64-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32
; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]]
; RV64-NEXT: [[TMP21:%.*]] = add nsw i32 [[OFFSET_IDX]], -1
; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
; RV64-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP22]]
-; RV64-NEXT: [[TMP24:%.*]] = mul i64 0, [[TMP18]]
-; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP18]], 1
+; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP20]] to i64
+; RV64-NEXT: [[TMP28:%.*]] = mul i64 0, [[TMP24]]
+; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1
; RV64-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP25]]
-; RV64-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[TMP23]], i64 [[TMP24]]
-; RV64-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[TMP27]], i64 [[TMP26]]
-; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP28]], align 4
-; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
+; RV64-NEXT: [[TMP38:%.*]] = getelementptr i32, ptr [[TMP23]], i64 [[TMP28]]
+; RV64-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[TMP38]], i64 [[TMP26]]
+; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP27]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
+; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
; RV64-NEXT: [[TMP29:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
; RV64-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP22]]
-; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP18]]
-; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP18]], 1
+; RV64-NEXT: [[TMP39:%.*]] = zext i32 [[TMP20]] to i64
+; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP39]]
+; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP39]], 1
; RV64-NEXT: [[TMP33:%.*]] = mul i64 -1, [[TMP32]]
-; RV64-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[TMP30]], i64 [[TMP31]]
-; RV64-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[TMP34]], i64 [[TMP33]]
-; RV64-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP29]])
-; RV64-NEXT: store <vscale x 4 x i32> [[REVERSE4]], ptr [[TMP35]], align 4
-; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
-; RV64-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV64-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; RV64-NEXT: [[TMP34:%.*]] = getelementptr i32, ptr [[TMP30]], i64 [[TMP31]]
+; RV64-NEXT: [[TMP35:%.*]] = getelementptr i32, ptr [[TMP34]], i64 [[TMP33]]
+; RV64-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP29]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
+; RV64-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE3]], ptr align 4 [[TMP35]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
+; RV64-NEXT: [[TMP36:%.*]] = zext i32 [[TMP20]] to i64
+; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP36]], [[INDEX]]
+; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP36]]
+; RV64-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
+; RV64-NEXT: br i1 [[TMP37]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; RV64: [[MIDDLE_BLOCK]]:
-; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
-; RV64-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; RV64-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
; RV64: [[SCALAR_PH]]:
-; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP19]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
-; RV64-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP20]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
+; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
+; RV64-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
; RV64-NEXT: br label %[[FOR_BODY:.*]]
; RV64: [[FOR_COND_CLEANUP_LOOPEXIT]]:
; RV64-NEXT: br label %[[FOR_COND_CLEANUP]]
@@ -289,10 +292,7 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV32-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
; RV32: [[FOR_BODY_PREHEADER]]:
; RV32-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
-; RV32-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
-; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
-; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; RV32: [[VECTOR_MEMCHECK]]:
; RV32-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
; RV32-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4
@@ -303,48 +303,48 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV32: [[VECTOR_PH]]:
; RV32-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
-; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP8]]
-; RV32-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
+; RV32-NEXT: [[TMP11:%.*]] = sub i64 [[TMP8]], 1
+; RV32-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP11]]
+; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP8]]
+; RV32-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; RV32-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
-; RV32-NEXT: [[TMP11:%.*]] = sub i64 [[TMP0]], [[N_VEC]]
-; RV32-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
-; RV32-NEXT: [[TMP12:%.*]] = sub i32 [[N]], [[DOTCAST]]
; RV32-NEXT: br label %[[VECTOR_BODY:.*]]
; RV32: [[VECTOR_BODY]]:
-; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV32-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32
; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]]
; RV32-NEXT: [[TMP13:%.*]] = add nsw i32 [[OFFSET_IDX]], -1
; RV32-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
; RV32-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP14]]
-; RV32-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP10]] to i32
; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP16]]
; RV32-NEXT: [[TMP18:%.*]] = sub i32 [[TMP16]], 1
; RV32-NEXT: [[TMP19:%.*]] = mul i32 -1, [[TMP18]]
-; RV32-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i32 [[TMP17]]
-; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[TMP20]], i32 [[TMP19]]
-; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP21]], align 4
-; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
+; RV32-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[TMP15]], i32 [[TMP17]]
+; RV32-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[TMP20]], i32 [[TMP19]]
+; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP28]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
; RV32-NEXT: [[TMP22:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
; RV32-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP14]]
-; RV32-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP10]] to i32
-; RV32-NEXT: [[TMP25:%.*]] = mul i32 0, [[TMP24]]
-; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP24]], 1
+; RV32-NEXT: [[TMP21:%.*]] = mul i32 0, [[TMP16]]
+; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP16]], 1
; RV32-NEXT: [[TMP27:%.*]] = mul i32 -1, [[TMP26]]
-; RV32-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[TMP23]], i32 [[TMP25]]
-; RV32-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP28]], i32 [[TMP27]]
-; RV32-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP22]])
-; RV32-NEXT: store <vscale x 4 x i32> [[REVERSE4]], ptr [[TMP29]], align 4
-; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
-; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; RV32-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[TMP23]], i32 [[TMP21]]
+; RV32-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP24]], i32 [[TMP27]]
+; RV32-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; RV32-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE3]], ptr align 4 [[TMP25]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; RV32-NEXT: [[TMP29:%.*]] = zext i32 [[TMP16]] to i64
+; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP29]], [[INDEX]]
+; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP29]]
+; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
+; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; RV32: [[MIDDLE_BLOCK]]:
-; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
-; RV32-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; RV32-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
; RV32: [[SCALAR_PH]]:
-; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP11]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
-; RV32-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP12]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
+; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
+; RV32-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
; RV32-NEXT: br label %[[FOR_BODY:.*]]
; RV32: [[FOR_COND_CLEANUP_LOOPEXIT]]:
; RV32-NEXT: br label %[[FOR_COND_CLEANUP]]
@@ -487,10 +487,7 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV64-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
; RV64: [[FOR_BODY_PREHEADER]]:
; RV64-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
-; RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
-; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
-; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
+; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
; RV64: [[VECTOR_SCEVCHECK]]:
; RV64-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1
; RV64-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1
@@ -514,46 +511,50 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV64: [[VECTOR_PH]]:
; RV64-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4
-; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP16]]
-; RV64-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
+; RV64-NEXT: [[TMP19:%.*]] = sub i64 [[TMP16]], 1
+; RV64-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP19]]
+; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP16]]
+; RV64-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; RV64-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4
-; RV64-NEXT: [[TMP19:%.*]] = sub i64 [[TMP0]], [[N_VEC]]
-; RV64-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
-; RV64-NEXT: [[TMP20:%.*]] = sub i32 [[N]], [[DOTCAST]]
; RV64-NEXT: br label %[[VECTOR_BODY:.*]]
; RV64: [[VECTOR_BODY]]:
-; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[TMP20:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV64-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32
; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]]
; RV64-NEXT: [[TMP21:%.*]] = add nsw i32 [[OFFSET_IDX]], -1
; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
; RV64-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP22]]
-; RV64-NEXT: [[TMP24:%.*]] = mul i64 0, [[TMP18]]
-; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP18]], 1
+; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP20]] to i64
+; RV64-NEXT: [[TMP28:%.*]] = mul i64 0, [[TMP24]]
+; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1
; RV64-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP25]]
-; RV64-NEXT: [[TMP27:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[TMP24]]
-; RV64-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP27]], i64 [[TMP26]]
-; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP28]], align 4
-; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]])
+; RV64-NEXT: [[TMP38:%.*]] = getelementptr float, ptr [[TMP23]], i64 [[TMP28]]
+; RV64-NEXT: [[TMP27:%.*]] = getelementptr float, ptr [[TMP38]], i64 [[TMP26]]
+; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP27]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
+; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
; RV64-NEXT: [[TMP29:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
; RV64-NEXT: [[TMP30:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP22]]
-; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP18]]
-; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP18]], 1
+; RV64-NEXT: [[TMP39:%.*]] = zext i32 [[TMP20]] to i64
+; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP39]]
+; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP39]], 1
; RV64-NEXT: [[TMP33:%.*]] = mul i64 -1, [[TMP32]]
-; RV64-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP30]], i64 [[TMP31]]
-; RV64-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP34]], i64 [[TMP33]]
-; RV64-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP29]])
-; RV64-NEXT: store <vscale x 4 x float> [[REVERSE4]], ptr [[TMP35]], align 4
-; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]]
-; RV64-NEXT: [[TMP36:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV64-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; RV64-NEXT: [[TMP34:%.*]] = getelementptr float, ptr [[TMP30]], i64 [[TMP31]]
+; RV64-NEXT: [[TMP35:%.*]] = getelementptr float, ptr [[TMP34]], i64 [[TMP33]]
+; RV64-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP29]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
+; RV64-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE3]], ptr align 4 [[TMP35]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
+; RV64-NEXT: [[TMP36:%.*]] = zext i32 [[TMP20]] to i64
+; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP36]], [[INDEX]]
+; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP36]]
+; RV64-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
+; RV64-NEXT: br i1 [[TMP37]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; RV64: [[MIDDLE_BLOCK]]:
-; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
-; RV64-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; RV64-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
; RV64: [[SCALAR_PH]]:
-; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP19]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
-; RV64-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP20]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
+; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
+; RV64-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
; RV64-NEXT: br label %[[FOR_BODY:.*]]
; RV64: [[FOR_COND_CLEANUP_LOOPEXIT]]:
; RV64-NEXT: br label %[[FOR_COND_CLEANUP]]
@@ -570,10 +571,7 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV32-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
; RV32: [[FOR_BODY_PREHEADER]]:
; RV32-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
-; RV32-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 4
-; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
-; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; RV32: [[VECTOR_MEMCHECK]]:
; RV32-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
; RV32-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4
@@ -584,48 +582,48 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; RV32: [[VECTOR_PH]]:
; RV32-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
-; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP8]]
-; RV32-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
+; RV32-NEXT: [[TMP11:%.*]] = sub i64 [[TMP8]], 1
+; RV32-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP11]]
+; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP8]]
+; RV32-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; RV32-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
-; RV32-NEXT: [[TMP11:%.*]] = sub i64 [[TMP0]], [[N_VEC]]
-; RV32-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
-; RV32-NEXT: [[TMP12:%.*]] = sub i32 [[N]], [[DOTCAST]]
; RV32-NEXT: br label %[[VECTOR_BODY:.*]]
; RV32: [[VECTOR_BODY]]:
-; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV32-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32
; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]]
; RV32-NEXT: [[TMP13:%.*]] = add nsw i32 [[OFFSET_IDX]], -1
; RV32-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
; RV32-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP14]]
-; RV32-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP10]] to i32
; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP16]]
; RV32-NEXT: [[TMP18:%.*]] = sub i32 [[TMP16]], 1
; RV32-NEXT: [[TMP19:%.*]] = mul i32 -1, [[TMP18]]
-; RV32-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP17]]
-; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP19]]
-; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP21]], align 4
-; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]])
+; RV32-NEXT: [[TMP20:%.*]] = getelementptr float, ptr [[TMP15]], i32 [[TMP17]]
+; RV32-NEXT: [[TMP28:%.*]] = getelementptr float, ptr [[TMP20]], i32 [[TMP19]]
+; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP28]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
; RV32-NEXT: [[TMP22:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
; RV32-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP14]]
-; RV32-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP10]] to i32
-; RV32-NEXT: [[TMP25:%.*]] = mul i32 0, [[TMP24]]
-; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP24]], 1
+; RV32-NEXT: [[TMP21:%.*]] = mul i32 0, [[TMP16]]
+; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP16]], 1
; RV32-NEXT: [[TMP27:%.*]] = mul i32 -1, [[TMP26]]
-; RV32-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[TMP25]]
-; RV32-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP28]], i32 [[TMP27]]
-; RV32-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP22]])
-; RV32-NEXT: store <vscale x 4 x float> [[REVERSE4]], ptr [[TMP29]], align 4
-; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
-; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; RV32-NEXT: [[TMP24:%.*]] = getelementptr float, ptr [[TMP23]], i32 [[TMP21]]
+; RV32-NEXT: [[TMP25:%.*]] = getelementptr float, ptr [[TMP24]], i32 [[TMP27]]
+; RV32-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; RV32-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE3]], ptr align 4 [[TMP25]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; RV32-NEXT: [[TMP29:%.*]] = zext i32 [[TMP16]] to i64
+; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP29]], [[INDEX]]
+; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP29]]
+; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
+; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; RV32: [[MIDDLE_BLOCK]]:
-; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
-; RV32-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; RV32-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
; RV32: [[SCALAR_PH]]:
-; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP11]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
-; RV32-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP12]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
+; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
+; RV32-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
; RV32-NEXT: br label %[[FOR_BODY:.*]]
; RV32: [[FOR_COND_CLEANUP_LOOPEXIT]]:
; RV32-NEXT: br label %[[FOR_COND_CLEANUP]]
@@ -762,98 +760,100 @@ define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) {
; RV64-LABEL: define void @vector_reverse_f32_simplify(
; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] {
; RV64-NEXT: [[ENTRY:.*]]:
-; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; RV64-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]]
-; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; RV64: [[VECTOR_PH]]:
; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]]
-; RV64-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]]
+; RV64-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; RV64-NEXT: [[N_RND_UP:%.*]] = add i64 1023, [[TMP6]]
+; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; RV64-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; RV64-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; RV64-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; RV64-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]]
; RV64-NEXT: br label %[[VECTOR_BODY:.*]]
; RV64: [[VECTOR_BODY]]:
-; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV64-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
; RV64-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1
; RV64-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP7]]
-; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP5]]
-; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP5]], 1
+; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP19]] to i64
+; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP24]]
+; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP24]], 1
; RV64-NEXT: [[TMP11:%.*]] = mul i64 -1, [[TMP10]]
-; RV64-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[TMP9]]
-; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[TMP11]]
-; RV64-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP13]], align 4
-; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]])
+; RV64-NEXT: [[TMP12:%.*]] = getelementptr float, ptr [[TMP8]], i64 [[TMP9]]
+; RV64-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[TMP12]], i64 [[TMP11]]
+; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
+; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
; RV64-NEXT: [[TMP14:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
; RV64-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP7]]
-; RV64-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]]
-; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP5]], 1
+; RV64-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64
+; RV64-NEXT: [[TMP25:%.*]] = mul i64 0, [[TMP16]]
+; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP16]], 1
; RV64-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]]
-; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[TMP16]]
-; RV64-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i64 [[TMP18]]
-; RV64-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP14]])
-; RV64-NEXT: store <vscale x 4 x float> [[REVERSE1]], ptr [[TMP20]], align 4
-; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; RV64-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV64-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; RV64-NEXT: [[TMP20:%.*]] = getelementptr float, ptr [[TMP15]], i64 [[TMP25]]
+; RV64-NEXT: [[TMP21:%.*]] = getelementptr float, ptr [[TMP20]], i64 [[TMP18]]
+; RV64-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
+; RV64-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE1]], ptr align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
+; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP19]] to i64
+; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[INDEX]]
+; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
+; RV64-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023
+; RV64-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; RV64: [[MIDDLE_BLOCK]]:
-; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]]
-; RV64-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; RV64-NEXT: br [[EXIT:label %.*]]
; RV64: [[SCALAR_PH]]:
-; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
+; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1023, %[[ENTRY]] ]
; RV64-NEXT: br label %[[FOR_BODY:.*]]
; RV64: [[FOR_BODY]]:
;
; RV32-LABEL: define void @vector_reverse_f32_simplify(
; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] {
; RV32-NEXT: [[ENTRY:.*]]:
-; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; RV32-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]]
-; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; RV32: [[VECTOR_PH]]:
; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]]
-; RV32-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]]
+; RV32-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; RV32-NEXT: [[N_RND_UP:%.*]] = add i64 1023, [[TMP6]]
+; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; RV32-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; RV32-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; RV32-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; RV32-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]]
; RV32-NEXT: br label %[[VECTOR_BODY:.*]]
; RV32: [[VECTOR_BODY]]:
-; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; RV32-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
; RV32-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1
; RV32-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP7]]
-; RV32-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP5]] to i32
; RV32-NEXT: [[TMP10:%.*]] = mul i32 0, [[TMP9]]
; RV32-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], 1
; RV32-NEXT: [[TMP12:%.*]] = mul i32 -1, [[TMP11]]
-; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 [[TMP10]]
-; RV32-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP12]]
-; RV32-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP14]], align 4
-; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]])
+; RV32-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[TMP8]], i32 [[TMP10]]
+; RV32-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[TMP13]], i32 [[TMP12]]
+; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; RV32-NEXT: [[TMP15:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP7]]
-; RV32-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP5]] to i32
-; RV32-NEXT: [[TMP18:%.*]] = mul i32 0, [[TMP17]]
-; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP17]], 1
+; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP9]]
+; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP9]], 1
; RV32-NEXT: [[TMP20:%.*]] = mul i32 -1, [[TMP19]]
-; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 [[TMP18]]
-; RV32-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP20]]
-; RV32-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP15]])
-; RV32-NEXT: store <vscale x 4 x float> [[REVERSE1]], ptr [[TMP22]], align 4
-; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; RV32-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; RV32-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; RV32-NEXT: [[TMP18:%.*]] = getelementptr float, ptr [[TMP16]], i32 [[TMP17]]
+; RV32-NEXT: [[TMP22:%.*]] = getelementptr float, ptr [[TMP18]], i32 [[TMP20]]
+; RV32-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; RV32-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE1]], ptr align 4 [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; RV32-NEXT: [[TMP23:%.*]] = zext i32 [[TMP9]] to i64
+; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP23]], [[INDEX]]
+; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP23]]
+; RV32-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023
+; RV32-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; RV32: [[MIDDLE_BLOCK]]:
-; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]]
-; RV32-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; RV32-NEXT: br [[EXIT:label %.*]]
; RV32: [[SCALAR_PH]]:
-; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
+; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1023, %[[ENTRY]] ]
; RV32-NEXT: br label %[[FOR_BODY:.*]]
; RV32: [[FOR_BODY]]:
;
@@ -984,7 +984,7 @@ define void @vector_reverse_irregular_type(ptr noalias %A, ptr noalias %B) {
; RV64-NEXT: store i7 [[TMP28]], ptr [[TMP24]], align 1
; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; RV64-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1020
-; RV64-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; RV64-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; RV64: [[MIDDLE_BLOCK]]:
; RV64-NEXT: br label %[[SCALAR_PH]]
; RV64: [[SCALAR_PH]]:
@@ -1036,7 +1036,7 @@ define void @vector_reverse_irregular_type(ptr noalias %A, ptr noalias %B) {
; RV32-NEXT: store i7 [[TMP28]], ptr [[TMP24]], align 1
; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; RV32-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1020
-; RV32-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; RV32-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; RV32: [[MIDDLE_BLOCK]]:
; RV32-NEXT: br label %[[SCALAR_PH]]
; RV32: [[SCALAR_PH]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll
index 3370e92..770026e 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/safe-dep-distance.ll
@@ -10,36 +10,38 @@ target triple = "riscv64"
define void @test(ptr %p) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 200, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 200, [[TMP6]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 32
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 32 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 200
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP9]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP10]], align 32
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 200, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 200
@@ -47,7 +49,7 @@ define void @test(ptr %p) {
; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -86,7 +88,7 @@ define void @test_may_clobber(ptr %p) {
; CHECK-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP4]], align 32
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
@@ -101,7 +103,7 @@ define void @test_may_clobber(ptr %p) {
; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -127,36 +129,38 @@ exit:
define void @trivial_due_max_vscale(ptr %p) {
; CHECK-LABEL: @trivial_due_max_vscale(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 200, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 200, [[TMP6]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 32
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 32 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 8192
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP9]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP10]], align 32
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
+; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 200, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 8192
@@ -164,7 +168,7 @@ define void @trivial_due_max_vscale(ptr %p) {
; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -190,36 +194,38 @@ exit:
define void @no_high_lmul_or_interleave(ptr %p) {
; CHECK-LABEL: @no_high_lmul_or_interleave(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 200, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 200, [[TMP6]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 32
+; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 32 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1024
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP9]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP10]], align 32
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
+; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 200, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
; CHECK-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 1024
@@ -227,7 +233,7 @@ define void @no_high_lmul_or_interleave(ptr %p) {
; CHECK-NEXT: store i64 [[V]], ptr [[A2]], align 32
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -277,7 +283,7 @@ define void @safe_load_store_distance_not_pow_of_2(i64 %N) {
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], splat (i64 24)
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH]]
; CHECK: scalar.ph:
@@ -291,7 +297,7 @@ define void @safe_load_store_distance_not_pow_of_2(i64 %N) {
; CHECK-NEXT: store i16 0, ptr [[GEP_OFF]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 3
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV]], [[N]]
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll
index e51f6fa..b8d9c28 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-basics.ll
@@ -8,15 +8,14 @@
define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @vector_add(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -24,28 +23,31 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP8:%.*]] = add <vscale x 2 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP6]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP8]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -72,15 +74,14 @@ for.end:
define void @vector_add_i32(ptr noalias nocapture %a, i32 %v, i64 %n) {
; CHECK-LABEL: @vector_add_i32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[V:%.*]], i64 0
@@ -88,28 +89,31 @@ define void @vector_add_i32(ptr noalias nocapture %a, i32 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP8:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <vscale x 4 x i32> [[TMP8]], ptr [[TMP6]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP8]], ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[ELEM]], [[V]]
; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -174,15 +178,14 @@ for.end:
define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %v, i64 %n) {
; CHECK-LABEL: @indexed_store(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -190,28 +193,31 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], <vscale x 2 x i64> [[WIDE_LOAD]]
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x ptr> [[TMP8]], i32 8, <vscale x 2 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x ptr> align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]]
; CHECK-NEXT: store i64 [[V]], ptr [[AADDR]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -235,40 +241,43 @@ for.end:
define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %v, i64 %n) {
; CHECK-LABEL: @indexed_load(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP7]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]])
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], <vscale x 2 x i64> [[WIDE_LOAD]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> [[TMP8]], i32 8, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> poison)
-; CHECK-NEXT: [[TMP9]] = add <vscale x 2 x i64> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.vp.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]])
+; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 2 x i64> [[VEC_PHI]], [[WIDE_MASKED_GATHER]]
+; CHECK-NEXT: [[TMP9]] = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP13]], <vscale x 2 x i64> [[VEC_PHI]], i32 [[TMP12]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP9]])
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP11]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]]
@@ -276,7 +285,7 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[SUM_NEXT]] = add i64 [[SUM]], [[ELEM]]
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i64 [[SUM_NEXT_LCSSA]]
@@ -303,15 +312,14 @@ for.end:
define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-LABEL: @splat_int(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP9:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP9]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
@@ -319,24 +327,27 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP6]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -358,15 +369,14 @@ for.end:
define void @splat_ptr(ptr noalias nocapture %a, ptr %v, i64 %n) {
; CHECK-LABEL: @splat_ptr(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP9:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP9]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[V:%.*]], i64 0
@@ -374,24 +384,27 @@ define void @splat_ptr(ptr noalias nocapture %a, ptr %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: store <vscale x 2 x ptr> [[BROADCAST_SPLAT]], ptr [[TMP6]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2p0.p0(<vscale x 2 x ptr> [[BROADCAST_SPLAT]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: store ptr [[V]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
index 5c6febc..2d2d76a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
@@ -5,50 +5,52 @@ define i32 @select_icmp(i32 %x, i32 %y, ptr nocapture readonly %c, i64 %n) {
; CHECK-LABEL: define i32 @select_icmp(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]], ptr readonly captures(none) [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP13]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp sge <vscale x 4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[A:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[A:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP12]], [[X]]
; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[COND_LCSSA]]
@@ -75,50 +77,52 @@ define i32 @select_fcmp(float %x, i32 %y, ptr nocapture readonly %c, i64 %n) {
; CHECK-LABEL: define i32 @select_fcmp(
; CHECK-SAME: float [[X:%.*]], i32 [[Y:%.*]], ptr readonly captures(none) [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP13]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[X]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast uge <vscale x 4 x float> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[FOR_END:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[A:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[A:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast olt float [[TMP12]], [[X]]
; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[COND_LCSSA]]
@@ -145,48 +149,50 @@ define i32 @select_const_i32_from_icmp(ptr nocapture readonly %v, i64 %n) {
; CHECK-LABEL: define i32 @select_const_i32_from_icmp(
; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP20:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP20]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP21]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
-; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP21]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP21]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 7, i32 3
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 3, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 3, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ 3, %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3
; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 7
; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
-; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[DOTLCSSA]]
@@ -213,48 +219,50 @@ define i32 @select_i32_from_icmp(ptr nocapture readonly %v, i32 %a, i32 %b, i64
; CHECK-LABEL: define i32 @select_i32_from_icmp(
; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i32 [[A:%.*]], i32 [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP20:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP20]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP21]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
-; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP21]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP21]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[B]], i32 [[A]]
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ [[A]], %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[A]], %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[A]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3
; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 [[B]]
; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
-; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[DOTLCSSA]]
@@ -281,48 +289,50 @@ define i32 @select_const_i32_from_fcmp(ptr nocapture readonly %v, i64 %n) {
; CHECK-LABEL: define i32 @select_const_i32_from_fcmp(
; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP20:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP20]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[V]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP21]])
; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast one <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
-; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: [[TMP8]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP21]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP21]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 1, i32 2
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 2, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ 2, %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[V]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4
; CHECK-NEXT: [[TMP16:%.*]] = fcmp fast ueq float [[TMP15]], 3.000000e+00
; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 1
; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
-; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[DOTLCSSA]]
@@ -386,45 +396,53 @@ define i32 @pred_select_const_i32_from_icmp(ptr noalias nocapture readonly %src1
; CHECK-LABEL: define i32 @pred_select_const_i32_from_icmp(
; CHECK-SAME: ptr noalias readonly captures(none) [[SRC1:%.*]], ptr noalias readonly captures(none) [[SRC2:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP11:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP11]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PREDPHI:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP17]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP18:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT: [[TMP19:%.*]] = icmp ult <vscale x 4 x i32> [[TMP18]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP6]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP17]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 35)
+; CHECK-NEXT: [[TMP20:%.*]] = select <vscale x 4 x i1> [[TMP19]], <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> zeroinitializer
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[SRC2]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> poison)
+; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP8]], <vscale x 4 x i1> [[TMP7]], i32 [[TMP17]])
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 2)
; CHECK-NEXT: [[TMP10:%.*]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP9]]
-; CHECK-NEXT: [[PREDPHI]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i1> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: [[PREDPHI1:%.*]] = select <vscale x 4 x i1> [[TMP20]], <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i1> [[VEC_PHI]]
+; CHECK-NEXT: [[PREDPHI]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[PREDPHI1]], <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP17]])
+; CHECK-NEXT: [[TMP21:%.*]] = zext i32 [[TMP17]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
+; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[PREDPHI]])
; CHECK-NEXT: [[TMP13:%.*]] = freeze i1 [[TMP12]]
; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP13]], i32 1, i32 0
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[I_013:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_INC:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_012:%.*]] = phi i32 [ [[R_1:%.*]], %[[FOR_INC]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_013:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_INC:.*]] ], [ 0, %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_012:%.*]] = phi i32 [ [[R_1:%.*]], %[[FOR_INC]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[I_013]]
; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP14]], 35
@@ -439,7 +457,7 @@ define i32 @pred_select_const_i32_from_icmp(ptr noalias nocapture readonly %src1
; CHECK-NEXT: [[R_1]] = phi i32 [ [[R_012]], %[[FOR_BODY]] ], [ [[SPEC_SELECT]], %[[IF_THEN]] ]
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_013]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: [[FOR_END_LOOPEXIT]]:
; CHECK-NEXT: [[R_1_LCSSA:%.*]] = phi i32 [ [[R_1]], %[[FOR_INC]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i32 [[R_1_LCSSA]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll
index 13a4b16..8c804e5 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll
@@ -4,28 +4,16 @@
define void @small_trip_count_min_vlen_128(ptr nocapture %a) nounwind vscale_range(4,1024) {
; CHECK-LABEL: @small_trip_count_min_vlen_128(
; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; CHECK: vector.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1:%.*]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], splat (i32 1)
-; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP1]], align 4
-; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
-; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP1:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP1]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 [[IV]]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP1]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[TMP1:%.*]], i32 [[IV]]
; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[V]], 1
; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[IV]], 3
-; CHECK-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP1]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: br i1 [[COND]], label [[EXIT:%.*]], label [[LOOP1]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -49,28 +37,16 @@ exit:
define void @small_trip_count_min_vlen_32(ptr nocapture %a) nounwind vscale_range(1,1024) {
; CHECK-LABEL: @small_trip_count_min_vlen_32(
; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; CHECK: vector.ph:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1:%.*]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], splat (i32 1)
-; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP1]], align 4
-; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
-; CHECK: middle.block:
-; CHECK-NEXT: br label [[EXIT:%.*]]
-; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP1:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP1]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 [[IV]]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[LOOP1]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[TMP1:%.*]], i32 [[IV]]
; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[V]], 1
; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[IV]], 3
-; CHECK-NEXT: br i1 [[COND]], label [[EXIT]], label [[LOOP1]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[COND]], label [[EXIT:%.*]], label [[LOOP1]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
index df907dc..f0a74fc 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
@@ -6,45 +6,47 @@
define void @single_constant_stride_int_scaled(ptr %p) {
; CHECK-LABEL: @single_constant_stride_int_scaled(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH1:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[TMP5]]
+; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP4]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 1)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]]
-; CHECK-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP7]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
+; CHECK-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP12]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 8)
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[TMP14]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP15]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP16]], <vscale x 4 x ptr> [[TMP15]], i32 4, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP16]], <vscale x 4 x ptr> align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP11]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH1]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], 8
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
@@ -52,7 +54,7 @@ define void @single_constant_stride_int_scaled(ptr %p) {
; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[SCALAR_PH]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -77,46 +79,48 @@ exit:
define void @single_constant_stride_int_iv(ptr %p) {
; CHECK-LABEL: @single_constant_stride_int_iv(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP10:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP10]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
-; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 64
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 64)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP8]]
-; CHECK-NEXT: [[TMP11:%.*]] = mul i64 64, [[TMP5]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP11]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[TMP9:%.*]] = mul i64 64, [[TMP11]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP13]], <vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP13]], <vscale x 4 x ptr> align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP14]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[OFFSET:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
@@ -124,7 +128,7 @@ define void @single_constant_stride_int_iv(ptr %p) {
; CHECK-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], 64
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -151,55 +155,52 @@ exit:
define void @single_constant_stride_ptr_iv(ptr %p) {
; CHECK-LABEL: @single_constant_stride_ptr_iv(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 1024, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH1:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
-; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[TMP5]]
+; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP4]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
-; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[N_VEC]], 8
-; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP18]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P:%.*]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP16:%.*]] = mul <vscale x 4 x i64> [[TMP14]], splat (i64 8)
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[TMP16]]
-; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x ptr> [[VECTOR_GEP]], i32 0
-; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP17]], align 4
-; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
-; CHECK-NEXT: [[TMP19:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
+; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[TMP19:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[VECTOR_GEP]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[TMP19]], splat (i32 1)
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> [[VECTOR_GEP]], i32 4, <vscale x 4 x i1> splat (i1 true))
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
-; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP8]]
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> align 4 [[VECTOR_GEP]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP9]], [[INDEX]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP11]] to i64
+; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP10]]
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
-; CHECK-NEXT: br label [[SCALAR_PH]]
+; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[P]], [[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[P]], [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[SCALAR_PH1]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[P]], [[SCALAR_PH1]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4
; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
; CHECK-NEXT: store i32 [[Y0]], ptr [[PTR]], align 4
; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 8
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[DONE]], label [[SCALAR_PH]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -225,36 +226,37 @@ exit:
define void @single_stride_int_scaled(ptr %p, i64 %stride) {
; NOSTRIDED-LABEL: @single_stride_int_scaled(
; NOSTRIDED-NEXT: entry:
-; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
-; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; NOSTRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; NOSTRIDED: vector.scevcheck:
; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; NOSTRIDED: vector.ph:
; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
-; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; NOSTRIDED-NEXT: [[TMP2:%.*]] = sub i64 [[TMP4]], 1
+; NOSTRIDED-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]]
+; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
+; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; NOSTRIDED: vector.body:
; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[INDEX]]
-; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
+; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1)
-; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP8]], align 4
-; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; NOSTRIDED-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64
+; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; NOSTRIDED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; NOSTRIDED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; NOSTRIDED: middle.block:
-; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NOSTRIDED-NEXT: br label [[EXIT:%.*]]
; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
; NOSTRIDED: loop:
; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
@@ -265,7 +267,7 @@ define void @single_stride_int_scaled(ptr %p, i64 %stride) {
; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
+; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP10:![0-9]+]]
; NOSTRIDED: exit:
; NOSTRIDED-NEXT: ret void
;
@@ -306,37 +308,38 @@ exit:
define void @single_stride_int_iv(ptr %p, i64 %stride) {
; NOSTRIDED-LABEL: @single_stride_int_iv(
; NOSTRIDED-NEXT: entry:
-; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
-; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; NOSTRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; NOSTRIDED: vector.scevcheck:
; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; NOSTRIDED: vector.ph:
; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
-; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; NOSTRIDED-NEXT: [[TMP2:%.*]] = sub i64 [[TMP4]], 1
+; NOSTRIDED-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]]
+; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
+; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; NOSTRIDED: vector.body:
; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[INDEX]]
-; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
+; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1)
-; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP8]], align 4
-; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; NOSTRIDED-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64
+; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; NOSTRIDED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; NOSTRIDED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; NOSTRIDED: middle.block:
-; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NOSTRIDED-NEXT: br label [[EXIT:%.*]]
; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
; NOSTRIDED: loop:
; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
@@ -348,7 +351,7 @@ define void @single_stride_int_iv(ptr %p, i64 %stride) {
; NOSTRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE]]
; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP11:![0-9]+]]
+; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP12:![0-9]+]]
; NOSTRIDED: exit:
; NOSTRIDED-NEXT: ret void
;
@@ -429,11 +432,7 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; NOSTRIDED-NEXT: entry:
; NOSTRIDED-NEXT: [[P3:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; NOSTRIDED-NEXT: [[P21:%.*]] = ptrtoint ptr [[P2:%.*]] to i64
-; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
-; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; NOSTRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; NOSTRIDED: vector.scevcheck:
; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]]
@@ -447,26 +446,31 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; NOSTRIDED: vector.ph:
; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
; NOSTRIDED-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
-; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP8]]
-; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; NOSTRIDED-NEXT: [[TMP11:%.*]] = sub i64 [[TMP8]], 1
+; NOSTRIDED-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP11]]
+; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP8]]
+; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; NOSTRIDED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
; NOSTRIDED-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; NOSTRIDED: vector.body:
; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; NOSTRIDED-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P]], i64 [[INDEX]]
-; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP12]], align 4
+; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
; NOSTRIDED-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1)
; NOSTRIDED-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P2]], i64 [[INDEX]]
-; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP15]], align 4
-; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
-; NOSTRIDED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP14]], ptr align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
+; NOSTRIDED-NEXT: [[TMP13:%.*]] = zext i32 [[TMP16]] to i64
+; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]]
+; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
+; NOSTRIDED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; NOSTRIDED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; NOSTRIDED: middle.block:
-; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NOSTRIDED-NEXT: br label [[EXIT:%.*]]
; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ]
+; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
; NOSTRIDED: loop:
; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
@@ -478,17 +482,13 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP13:![0-9]+]]
+; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
; NOSTRIDED: exit:
; NOSTRIDED-NEXT: ret void
;
; STRIDED-LABEL: @double_stride_int_scaled(
; STRIDED-NEXT: entry:
-; STRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; STRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; STRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 80, i64 [[TMP1]])
-; STRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; STRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
+; STRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; STRIDED: vector.scevcheck:
; STRIDED-NEXT: [[TMP24:%.*]] = shl i64 [[STRIDE:%.*]], 2
; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], -4
@@ -539,8 +539,10 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED: vector.ph:
; STRIDED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; STRIDED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
-; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP9]]
-; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; STRIDED-NEXT: [[TMP42:%.*]] = sub i64 [[TMP9]], 1
+; STRIDED-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP42]]
+; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP9]]
+; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; STRIDED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
; STRIDED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
; STRIDED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
@@ -548,28 +550,32 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; STRIDED-NEXT: [[TMP14:%.*]] = mul <vscale x 4 x i64> [[TMP12]], splat (i64 1)
; STRIDED-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP14]]
-; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 1, [[TMP11]]
-; STRIDED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP17]], i64 0
-; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; STRIDED: vector.body:
; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; STRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; STRIDED-NEXT: [[TMP43:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; STRIDED-NEXT: [[TMP44:%.*]] = zext i32 [[TMP43]] to i64
+; STRIDED-NEXT: [[TMP45:%.*]] = mul i64 1, [[TMP44]]
+; STRIDED-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP45]], i64 0
+; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; STRIDED-NEXT: [[TMP18:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT1]]
; STRIDED-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP18]]
-; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP19]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison), !alias.scope [[META8:![0-9]+]]
+; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META9:![0-9]+]]
; STRIDED-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; STRIDED-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[P2]], <vscale x 4 x i64> [[TMP18]]
-; STRIDED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> [[TMP21]], i32 4, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META11:![0-9]+]], !noalias [[META8]]
-; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
+; STRIDED-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP43]]), !alias.scope [[META12:![0-9]+]], !noalias [[META9]]
+; STRIDED-NEXT: [[TMP46:%.*]] = zext i32 [[TMP43]] to i64
+; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP46]], [[INDEX]]
+; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP46]]
; STRIDED-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; STRIDED-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; STRIDED-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; STRIDED-NEXT: [[TMP41:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; STRIDED-NEXT: br i1 [[TMP41]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; STRIDED: middle.block:
-; STRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; STRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; STRIDED-NEXT: br label [[EXIT:%.*]]
; STRIDED: scalar.ph:
-; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[VECTOR_MEMCHECK1]] ]
+; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK1]] ]
; STRIDED-NEXT: br label [[LOOP:%.*]]
; STRIDED: loop:
; STRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
@@ -581,7 +587,7 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
+; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
; STRIDED: exit:
; STRIDED-NEXT: ret void
;
@@ -607,37 +613,38 @@ exit:
define void @double_stride_int_iv(ptr %p, ptr %p2, i64 %stride) {
; NOSTRIDED-LABEL: @double_stride_int_iv(
; NOSTRIDED-NEXT: entry:
-; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
-; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; NOSTRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; NOSTRIDED: vector.scevcheck:
; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; NOSTRIDED: vector.ph:
; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
-; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
-; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; NOSTRIDED-NEXT: [[TMP2:%.*]] = sub i64 [[TMP4]], 1
+; NOSTRIDED-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP2]]
+; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
+; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; NOSTRIDED: vector.body:
; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[INDEX]]
-; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
+; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1)
-; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP8]], align 4
-; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP8]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
+; NOSTRIDED-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64
+; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; NOSTRIDED-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; NOSTRIDED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; NOSTRIDED: middle.block:
-; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NOSTRIDED-NEXT: br label [[EXIT:%.*]]
; NOSTRIDED: scalar.ph:
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
-; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
; NOSTRIDED: loop:
; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
@@ -650,7 +657,7 @@ define void @double_stride_int_iv(ptr %p, ptr %p2, i64 %stride) {
; NOSTRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE]]
; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
+; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP16:![0-9]+]]
; NOSTRIDED: exit:
; NOSTRIDED-NEXT: ret void
;
@@ -692,7 +699,6 @@ exit:
ret void
}
-
define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
; NOSTRIDED-LABEL: @double_stride_ptr_iv(
; NOSTRIDED-NEXT: entry:
@@ -714,11 +720,7 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
;
; STRIDED-LABEL: @double_stride_ptr_iv(
; STRIDED-NEXT: entry:
-; STRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; STRIDED-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; STRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
-; STRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
-; STRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
+; STRIDED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; STRIDED: vector.memcheck:
; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[STRIDE:%.*]], 1023
; STRIDED-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P2:%.*]], i64 [[TMP3]]
@@ -740,19 +742,18 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
; STRIDED: vector.ph:
; STRIDED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; STRIDED-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
-; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP9]]
-; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; STRIDED-NEXT: [[TMP10:%.*]] = sub i64 [[TMP9]], 1
+; STRIDED-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP10]]
+; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP9]]
+; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; STRIDED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
; STRIDED-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 4
-; STRIDED-NEXT: [[TMP10:%.*]] = mul i64 [[N_VEC]], [[STRIDE]]
-; STRIDED-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP10]]
-; STRIDED-NEXT: [[TMP11:%.*]] = mul i64 [[N_VEC]], [[STRIDE]]
-; STRIDED-NEXT: [[IND_END7:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP11]]
; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
; STRIDED: vector.body:
; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[POINTER_PHI11:%.*]] = phi ptr [ [[P2]], [[VECTOR_PH]] ], [ [[PTR_IND12:%.*]], [[VECTOR_BODY]] ]
+; STRIDED-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[TMP19:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; STRIDED-NEXT: [[DOTSPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
; STRIDED-NEXT: [[DOTSPLAT10:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
@@ -761,23 +762,27 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[TMP27:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; STRIDED-NEXT: [[TMP21:%.*]] = mul <vscale x 4 x i64> [[TMP27]], [[DOTSPLAT10]]
; STRIDED-NEXT: [[VECTOR_GEP7:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[TMP21]]
-; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[VECTOR_GEP7]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison), !alias.scope [[META15:![0-9]+]]
+; STRIDED-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.vp.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> align 4 [[VECTOR_GEP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]), !alias.scope [[META16:![0-9]+]]
; STRIDED-NEXT: [[TMP30:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
-; STRIDED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP30]], <vscale x 4 x ptr> [[VECTOR_GEP]], i32 4, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META18:![0-9]+]], !noalias [[META15]]
-; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP13]]
-; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP13]]
+; STRIDED-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP30]], <vscale x 4 x ptr> align 4 [[VECTOR_GEP]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]), !alias.scope [[META19:![0-9]+]], !noalias [[META16]]
+; STRIDED-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64
+; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP16]], [[INDEX]]
+; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
+; STRIDED-NEXT: [[TMP20:%.*]] = zext i32 [[TMP14]] to i64
+; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP20]]
; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP25]]
-; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP13]]
+; STRIDED-NEXT: [[TMP22:%.*]] = zext i32 [[TMP14]] to i64
+; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP22]]
; STRIDED-NEXT: [[PTR_IND12]] = getelementptr i8, ptr [[POINTER_PHI11]], i64 [[TMP17]]
-; STRIDED-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; STRIDED-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; STRIDED-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; STRIDED-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; STRIDED: middle.block:
-; STRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; STRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; STRIDED-NEXT: br label [[EXIT:%.*]]
; STRIDED: scalar.ph:
-; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
-; STRIDED-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[P]], [[ENTRY]] ], [ [[P]], [[VECTOR_MEMCHECK]] ]
-; STRIDED-NEXT: [[BC_RESUME_VAL8:%.*]] = phi ptr [ [[IND_END7]], [[MIDDLE_BLOCK]] ], [ [[P2]], [[ENTRY]] ], [ [[P2]], [[VECTOR_MEMCHECK]] ]
+; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
+; STRIDED-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[P]], [[ENTRY]] ], [ [[P]], [[VECTOR_MEMCHECK]] ]
+; STRIDED-NEXT: [[BC_RESUME_VAL8:%.*]] = phi ptr [ [[P2]], [[ENTRY]] ], [ [[P2]], [[VECTOR_MEMCHECK]] ]
; STRIDED-NEXT: br label [[LOOP:%.*]]
; STRIDED: loop:
; STRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
@@ -790,7 +795,7 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[PTR2_NEXT]] = getelementptr inbounds i8, ptr [[PTR2]], i64 [[STRIDE]]
; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
-; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP21:![0-9]+]]
+; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP22:![0-9]+]]
; STRIDED: exit:
; STRIDED-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
index 034b767..68d55d0 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
@@ -8,15 +8,14 @@ define void @test_pr98413_zext_removed(ptr %src, ptr noalias %dst, i64 %x) {
; CHECK-LABEL: define void @test_pr98413_zext_removed(
; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]], i64 [[X:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 97, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 97, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 97, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP9:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 97, [[TMP9]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[X]], i64 0
@@ -24,24 +23,27 @@ define void @test_pr98413_zext_removed(ptr %src, ptr noalias %dst, i64 %x) {
; CHECK-NEXT: [[TMP6:%.*]] = trunc <vscale x 8 x i64> [[BROADCAST_SPLAT]] to <vscale x 8 x i8>
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 97, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[TMP7]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP8]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 8 [[TMP8]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
; CHECK-NEXT: [[TMP10:%.*]] = trunc <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i8>
; CHECK-NEXT: [[TMP11:%.*]] = and <vscale x 8 x i8> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP7]]
-; CHECK-NEXT: store <vscale x 8 x i8> [[TMP11]], ptr [[TMP12]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[TMP5]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP11]], ptr align 1 [[TMP12]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[TMP7]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 97
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 97, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC]], align 8
; CHECK-NEXT: [[EXT_L:%.*]] = zext i16 [[L]] to i64
@@ -51,7 +53,7 @@ define void @test_pr98413_zext_removed(ptr %src, ptr noalias %dst, i64 %x) {
; CHECK-NEXT: store i8 [[TRUNC_AND]], ptr [[GEP_DST]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 96
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -79,15 +81,14 @@ define void @test_pr98413_sext_removed(ptr %src, ptr noalias %dst, i64 %x) {
; CHECK-LABEL: define void @test_pr98413_sext_removed(
; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]], i64 [[X:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 97, [[TMP1]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 97, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 97, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP9:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 97, [[TMP9]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[X]], i64 0
@@ -95,24 +96,27 @@ define void @test_pr98413_sext_removed(ptr %src, ptr noalias %dst, i64 %x) {
; CHECK-NEXT: [[TMP6:%.*]] = trunc <vscale x 8 x i64> [[BROADCAST_SPLAT]] to <vscale x 8 x i8>
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 97, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[TMP7]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP8]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 8 [[TMP8]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
; CHECK-NEXT: [[TMP10:%.*]] = trunc <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i8>
; CHECK-NEXT: [[TMP11:%.*]] = and <vscale x 8 x i8> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP7]]
-; CHECK-NEXT: store <vscale x 8 x i8> [[TMP11]], ptr [[TMP12]], align 1
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[TMP5]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP11]], ptr align 1 [[TMP12]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[TMP7]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
+; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 97
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 97, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i16, ptr [[SRC]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC]], align 8
; CHECK-NEXT: [[EXT_L:%.*]] = sext i16 [[L]] to i64
@@ -122,7 +126,7 @@ define void @test_pr98413_sext_removed(ptr %src, ptr noalias %dst, i64 %x) {
; CHECK-NEXT: store i8 [[TRUNC_AND]], ptr [[GEP_DST]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV]], 96
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -172,7 +176,7 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 {
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP6]], [[EVL_BASED_IV]]
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP6]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 9
-; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
@@ -259,15 +263,14 @@ define void @icmp_only_first_op_truncated(ptr noalias %dst, i32 %x, i64 %N, i64
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[T:%.*]] = trunc i64 [[N]] to i32
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[V]], 1
-; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP2:%.*]] = mul nuw i64 [[TMP1]], 2
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[TMP4]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP0]], [[TMP13]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP4]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[N]], i64 0
@@ -284,20 +287,23 @@ define void @icmp_only_first_op_truncated(ptr noalias %dst, i32 %x, i64 %N, i64
; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> [[BROADCAST_SPLAT4]], i32 8, <vscale x 2 x i1> [[TMP8]], <vscale x 2 x double> poison)
-; CHECK-NEXT: call void @llvm.masked.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[WIDE_MASKED_GATHER]], <vscale x 2 x ptr> [[BROADCAST_SPLAT6]], i32 8, <vscale x 2 x i1> [[TMP8]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr> align 8 [[BROADCAST_SPLAT4]], <vscale x 2 x i1> [[TMP8]], i32 [[TMP14]])
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0(<vscale x 2 x double> [[WIDE_MASKED_GATHER]], <vscale x 2 x ptr> align 8 [[BROADCAST_SPLAT6]], <vscale x 2 x i1> [[TMP8]], i32 [[TMP14]])
+; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[EXIT:.*]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[T1:%.*]] = trunc i64 [[N]] to i32
; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[T1]], [[T]]
; CHECK-NEXT: br i1 [[C]], label %[[THEN:.*]], label %[[LOOP_LATCH]]
@@ -344,15 +350,15 @@ attributes #0 = { "target-features"="+64bit,+v,+zvl256b" }
attributes #1 = { "target-features"="+64bit,+v" }
;.
-; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
-; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
-; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META7:![0-9]+]], [[META2]]}
-; CHECK: [[META7]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
-; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]}
-; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]}
-; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META2]], [[META1]]}
+; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]}
+; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META3]], [[META1]]}
+; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META3]], [[META1]]}
+; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META3]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
index d97e93d..722fe5e 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
@@ -10,42 +10,44 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6
; SCALABLE-LABEL: define void @uniform_load(
; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; SCALABLE-NEXT: [[ENTRY:.*]]:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; SCALABLE: [[VECTOR_PH]]:
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP6]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; SCALABLE-NEXT: [[TMP7:%.*]] = load i64, ptr [[B]], align 8
; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP7]], i64 0
; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP8]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; SCALABLE-NEXT: br label %[[FOR_END:.*]]
; SCALABLE: [[SCALAR_PH]]:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8
; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -168,7 +170,7 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap
; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP8]], align 8
; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
@@ -182,7 +184,7 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap
; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V]], %[[FOR_BODY]] ], [ [[TMP6]], %[[MIDDLE_BLOCK]] ]
; SCALABLE-NEXT: ret i64 [[V_LCSSA]]
@@ -286,15 +288,14 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; SCALABLE-LABEL: define void @conditional_uniform_load(
; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; SCALABLE-NEXT: [[ENTRY:.*]]:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; SCALABLE: [[VECTOR_PH]]:
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP14:%.*]] = sub i64 [[TMP3]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP14]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[B]], i64 0
@@ -302,30 +303,39 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; SCALABLE-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 1)
; SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP7]]
-; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP5]]
-; SCALABLE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
-; SCALABLE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP17:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP17]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP8:%.*]] = zext i32 [[TMP17]] to i64
+; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP8]]
+; SCALABLE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
+; SCALABLE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP18:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; SCALABLE-NEXT: [[TMP11:%.*]] = icmp ult <vscale x 4 x i32> [[TMP18]], [[BROADCAST_SPLAT4]]
; SCALABLE-NEXT: [[TMP10:%.*]] = icmp ugt <vscale x 4 x i64> [[VEC_IND]], splat (i64 10)
-; SCALABLE-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> [[BROADCAST_SPLAT]], i32 8, <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i64> poison)
-; SCALABLE-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i64> [[WIDE_MASKED_GATHER]], <vscale x 4 x i64> zeroinitializer
+; SCALABLE-NEXT: [[TMP13:%.*]] = select <vscale x 4 x i1> [[TMP11]], <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i1> zeroinitializer
+; SCALABLE-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i64> @llvm.vp.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> align 8 [[BROADCAST_SPLAT]], <vscale x 4 x i1> [[TMP10]], i32 [[TMP17]])
+; SCALABLE-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i64> [[WIDE_MASKED_GATHER]], <vscale x 4 x i64> zeroinitializer
; SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 4 x i64> [[PREDPHI]], ptr [[TMP12]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv4i64.p0(<vscale x 4 x i64> [[PREDPHI]], ptr align 8 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP17]])
+; SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP17]] to i64
+; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; SCALABLE-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; SCALABLE-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; SCALABLE-NEXT: br label %[[FOR_END:.*]]
; SCALABLE: [[SCALAR_PH]]:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
+; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
; SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10
; SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_LOAD:.*]], label %[[LATCH]]
; SCALABLE: [[DO_LOAD]]:
@@ -337,7 +347,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; SCALABLE-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -482,42 +492,44 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; SCALABLE-LABEL: define void @uniform_load_unaligned(
; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; SCALABLE-NEXT: [[ENTRY:.*]]:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; SCALABLE: [[VECTOR_PH]]:
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP7:%.*]] = sub i64 [[TMP3]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP7]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; SCALABLE-NEXT: [[TMP6:%.*]] = load i64, ptr [[B]], align 1
; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP6]], i64 0
; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP8]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
+; SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
+; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; SCALABLE-NEXT: br label %[[FOR_END:.*]]
; SCALABLE: [[SCALAR_PH]]:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 1
; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -619,42 +631,44 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; SCALABLE-LABEL: define void @uniform_store(
; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; SCALABLE-NEXT: [[ENTRY:.*]]:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; SCALABLE: [[VECTOR_PH]]:
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP6]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0
; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8
; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
+; SCALABLE-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; SCALABLE-NEXT: br label %[[FOR_END:.*]]
; SCALABLE: [[SCALAR_PH]]:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8
; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -756,54 +770,55 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; SCALABLE-LABEL: define void @uniform_store_of_loop_varying(
; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; SCALABLE-NEXT: [[ENTRY:.*]]:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; SCALABLE: [[VECTOR_PH]]:
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP9:%.*]] = sub i64 [[TMP3]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP9]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[B]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0
; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+; SCALABLE-NEXT: [[TMP13:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1)
+; SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP13]]
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+; SCALABLE-NEXT: [[TMP10:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; SCALABLE-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX:%.*]] = mul i64 1, [[TMP8]]
; SCALABLE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX]], i64 0
; SCALABLE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; SCALABLE-NEXT: [[TMP7:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP6]]
-; SCALABLE-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 1)
-; SCALABLE-NEXT: [[TMP9:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP8]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0
-; SCALABLE-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 1
-; SCALABLE-NEXT: [[TMP12:%.*]] = call i32 @llvm.vscale.i32()
-; SCALABLE-NEXT: [[TMP13:%.*]] = mul nuw i32 [[TMP12]], 2
-; SCALABLE-NEXT: [[TMP14:%.*]] = sub i32 [[TMP13]], 1
-; SCALABLE-NEXT: [[TMP15:%.*]] = extractelement <vscale x 2 x i64> [[TMP9]], i32 [[TMP14]]
-; SCALABLE-NEXT: store i64 [[TMP15]], ptr [[B]], align 8
+; SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[VEC_IND]], <vscale x 2 x ptr> align 8 [[BROADCAST_SPLAT1]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
; SCALABLE-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP10]]
-; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP16]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP16]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
+; SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[TMP10]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[DOTSPLAT]]
+; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; SCALABLE-NEXT: br label %[[FOR_END:.*]]
; SCALABLE: [[SCALAR_PH]]:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; SCALABLE-NEXT: store i64 [[IV]], ptr [[B]], align 8
; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -920,15 +935,14 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; SCALABLE-LABEL: define void @conditional_uniform_store(
; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; SCALABLE-NEXT: [[ENTRY:.*]]:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; SCALABLE: [[VECTOR_PH]]:
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP11:%.*]] = sub i64 [[TMP3]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP11]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0
@@ -938,29 +952,33 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; SCALABLE-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; SCALABLE-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1)
; SCALABLE-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP8]]
-; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP5]]
-; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP9]], i64 0
-; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; SCALABLE-NEXT: [[TMP14:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP14]]
+; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP9]], i64 0
+; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: [[TMP10:%.*]] = icmp ugt <vscale x 2 x i64> [[VEC_IND]], splat (i64 10)
-; SCALABLE-NEXT: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], <vscale x 2 x ptr> [[BROADCAST_SPLAT2]], i32 8, <vscale x 2 x i1> [[TMP10]])
+; SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], <vscale x 2 x ptr> align 8 [[BROADCAST_SPLAT2]], <vscale x 2 x i1> [[TMP10]], i32 [[TMP7]])
; SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT1]], ptr [[TMP12]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
+; SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64
+; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; SCALABLE-NEXT: br label %[[FOR_END:.*]]
; SCALABLE: [[SCALAR_PH]]:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
+; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
; SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10
; SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_STORE:.*]], label %[[LATCH]]
; SCALABLE: [[DO_STORE]]:
@@ -971,7 +989,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
@@ -1109,42 +1127,44 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap
; SCALABLE-LABEL: define void @uniform_store_unaligned(
; SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; SCALABLE-NEXT: [[ENTRY:.*]]:
-; SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
-; SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
-; SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; SCALABLE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; SCALABLE: [[VECTOR_PH]]:
; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
+; SCALABLE-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], 1
+; SCALABLE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP6]]
+; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP3]]
+; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0
; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; SCALABLE: [[VECTOR_BODY]]:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SCALABLE-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1
; SCALABLE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
-; SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP7]], align 8
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
-; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
+; SCALABLE-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
+; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP10]], [[INDEX]]
+; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP10]]
+; SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; SCALABLE: [[MIDDLE_BLOCK]]:
-; SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; SCALABLE-NEXT: br label %[[FOR_END:.*]]
; SCALABLE: [[SCALAR_PH]]:
-; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; SCALABLE: [[FOR_BODY]]:
-; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1
; SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; SCALABLE: [[FOR_END]]:
; SCALABLE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll
index bda9839..31501f6 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll
@@ -17,17 +17,28 @@ define void @vf_will_not_generate_any_vector_insts(ptr %src, ptr %dst) {
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <2 x ptr> poison, ptr [[DST]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <2 x ptr> [[BROADCAST_SPLATINSERT2]], <2 x ptr> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[TMP9:%.*]] = sub i64 [[TMP8]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 100, [[TMP9]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP8]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP10]], 4
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x ptr> poison, ptr [[DST]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x ptr> [[BROADCAST_SPLATINSERT]], <vscale x 4 x ptr> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4, !alias.scope [[META0:![0-9]+]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <2 x i32> poison, i32 [[TMP0]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT4]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> [[BROADCAST_SPLAT5]], <2 x ptr> [[BROADCAST_SPLAT3]], i32 4, <2 x i1> splat (i1 true)), !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
-; CHECK-NEXT: call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> [[BROADCAST_SPLAT5]], <2 x ptr> [[BROADCAST_SPLAT3]], i32 4, <2 x i1> splat (i1 true)), !alias.scope [[META3]], !noalias [[META0]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[SRC]], align 4, !alias.scope [[META0:![0-9]+]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP6]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT2]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT3]], <vscale x 4 x ptr> align 4 [[BROADCAST_SPLAT]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]]), !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
+; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -41,7 +52,7 @@ define void @vf_will_not_generate_any_vector_insts(ptr %src, ptr %dst) {
; CHECK-NEXT: store i32 [[DOTPRE]], ptr [[DST]], align 4
; CHECK-NEXT: [[TMP3]] = add nuw i64 [[TMP2]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[TMP3]], 100
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -65,8 +76,9 @@ exit:
; CHECK: [[META2]] = distinct !{[[META2]], !"LVerDomain"}
; CHECK: [[META3]] = !{[[META4:![0-9]+]]}
; CHECK: [[META4]] = distinct !{[[META4]], [[META2]]}
-; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]}
+; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]], [[META8:![0-9]+]]}
; CHECK: [[META6]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]]}
+; CHECK: [[META7]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
+; CHECK: [[META8]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META6]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll
index d7c9ce4..4669522 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-riscv-vector-reverse.ll
@@ -10,8 +10,7 @@
; RUN: -disable-output < %s 2>&1 | FileCheck %s
define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocapture noundef readonly %B, i32 noundef signext %n) {
-; CHECK: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF>=1' {
-; CHECK-NEXT: Live-in vp<[[VF:%.+]]> = VF
+; CHECK: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' {
; CHECK-NEXT: Live-in vp<[[VFxUF:%.+]]> = VF * UF
; CHECK-NEXT: Live-in vp<[[VTC:%.+]]> = vector-trip-count
; CHECK-NEXT: vp<[[OTC:%.+]]> = original trip-count
@@ -21,41 +20,42 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: Successor(s): scalar.ph, vector.ph
; CHECK-EMPTY:
; CHECK-NEXT: vector.ph:
-; CHECK-NEXT: vp<[[RESUME_IV_A:%.+]]> = DERIVED-IV ir<%n> + vp<[[VTC]]> * ir<-1>
-; CHECK-NEXT: vp<[[RESUME_IV_B:%.+]]> = DERIVED-IV ir<%n> + vp<[[VTC]]> * ir<-1>
; CHECK-NEXT: Successor(s): vector loop
; CHECK-EMPTY:
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[INDUCTION:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[INDEX_NEXT:%.+]]>
-; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<%n> + vp<[[INDUCTION]]> * ir<-1>
-; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<-1>, vp<[[VF]]>
+; CHECK-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%.+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
+; CHECK-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ vp<[[OTC]]>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
+; CHECK-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
+; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<%n> + vp<[[EVL_PHI]]> * ir<-1>
+; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<-1>, vp<[[EVL]]>
; CHECK-NEXT: CLONE ir<[[IDX:%.+]]> = add nsw vp<[[SCALAR_STEPS]]>, ir<-1>
; CHECK-NEXT: CLONE ir<[[IDX_PROM:%.+]]> = zext ir<[[IDX]]>
; CHECK-NEXT: CLONE ir<[[ARRAY_IDX_B:%.+]]> = getelementptr inbounds ir<[[B:%.+]]>, ir<[[IDX_PROM]]>
-; CHECK-NEXT: vp<[[VEC_END_PTR_B:%.+]]> = vector-end-pointer inbounds ir<[[ARRAY_IDX_B]]>, vp<[[VF]]>
-; CHECK-NEXT: WIDEN ir<[[VAL_B:%.+]]> = load vp<[[VEC_END_PTR_B]]>
+; CHECK-NEXT: vp<[[VEC_END_PTR_B:%.+]]> = vector-end-pointer ir<[[ARRAY_IDX_B]]>, vp<[[EVL]]>
+; CHECK-NEXT: WIDEN ir<[[VAL_B:%.+]]> = vp.load vp<[[VEC_END_PTR_B]]>, vp<[[EVL]]>
; CHECK-NEXT: WIDEN ir<[[ADD_RESULT:%.+]]> = add ir<[[VAL_B]]>, ir<1>
; CHECK-NEXT: CLONE ir<[[ARRAY_IDX_A:%.+]]> = getelementptr inbounds ir<[[A:%.+]]>, ir<[[IDX_PROM]]>
-; CHECK-NEXT: vp<[[VEC_END_PTR_A:%.+]]> = vector-end-pointer inbounds ir<[[ARRAY_IDX_A]]>, vp<[[VF]]>
-; CHECK-NEXT: WIDEN store vp<[[VEC_END_PTR_A]]>, ir<[[ADD_RESULT]]>
-; CHECK-NEXT: EMIT vp<[[INDEX_NEXT]]> = add nuw vp<[[INDUCTION]]>, vp<[[VFxUF]]>
+; CHECK-NEXT: vp<[[VEC_END_PTR_A:%.+]]> = vector-end-pointer ir<[[ARRAY_IDX_A]]>, vp<[[EVL]]>
+; CHECK-NEXT: WIDEN vp.store vp<[[VEC_END_PTR_A]]>, ir<[[ADD_RESULT]]>, vp<[[EVL]]>
+; CHECK-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[EVL]]>, vp<[[EVL_PHI]]>
+; CHECK-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[EVL]]>
+; CHECK-NEXT: EMIT vp<[[INDEX_NEXT]]> = add vp<[[INDUCTION]]>, vp<[[VFxUF]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[INDEX_NEXT]]>, vp<[[VTC]]>
; CHECK-NEXT: No successors
; CHECK-NEXT: }
; CHECK-NEXT: Successor(s): middle.block
; CHECK-EMPTY:
; CHECK-NEXT: middle.block:
-; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq vp<[[OTC]]>, vp<[[VTC]]>
-; CHECK-NEXT: EMIT branch-on-cond vp<[[CMP]]>
-; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup>, scalar.ph
+; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup>
; CHECK-EMPTY:
; CHECK-NEXT: ir-bb<for.cond.cleanup>:
; CHECK-NEXT: No successors
; CHECK-EMPTY:
; CHECK-NEXT: scalar.ph:
-; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[RESUME_IV_A]]>, middle.block ], [ ir<%n>, ir-bb<entry> ]
-; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ vp<[[RESUME_IV_B]]>, middle.block ], [ ir<%n>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ ir<%n>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ ir<%n>, ir-bb<entry> ]
; CHECK-NEXT: Successor(s): ir-bb<for.body>
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-03.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-03.ll
index 13c443c..b4eebcc 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-03.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-03.ll
@@ -12,7 +12,7 @@
define noundef i32 @fun(i32 %argc, ptr nocapture readnone %argv) {
entry:
%l_4774.i = alloca [4 x [2 x i128]], align 8
- call void @llvm.lifetime.start.p0(i64 128, ptr nonnull %l_4774.i)
+ call void @llvm.lifetime.start.p0(ptr nonnull %l_4774.i)
br label %for.cond4.preheader.i
for.cond4.preheader.i: ; preds = %for.cond4.preheader.i, %entry
@@ -31,13 +31,13 @@ func_1.exit: ; preds = %for.cond4.preheader
%cmp200.i = icmp ne i128 %0, 0
%conv202.i = zext i1 %cmp200.i to i64
%call203.i = tail call i64 @safe_sub_func_int64_t_s_s(i64 noundef %conv202.i, i64 noundef 9139899272418802852)
- call void @llvm.lifetime.end.p0(i64 128, ptr nonnull %l_4774.i)
+ call void @llvm.lifetime.end.p0(ptr nonnull %l_4774.i)
br label %for.cond
for.cond: ; preds = %for.cond, %func_1.exit
br label %for.cond
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare dso_local i64 @safe_sub_func_int64_t_s_s(i64, i64)
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll b/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll
index 639fb86..6fc7ed2 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr35432.ll
@@ -13,9 +13,9 @@ define i32 @main(ptr %ptr) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[S:%.*]] = alloca i16, align 2
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr nonnull [[S]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S]])
; CHECK-NEXT: [[CALL:%.*]] = call i32 (ptr, ...) @goo(ptr nonnull [[I]])
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4
; CHECK-NEXT: [[STOREMERGE6:%.*]] = trunc i32 [[TMP0]] to i16
@@ -111,16 +111,16 @@ define i32 @main(ptr %ptr) {
; CHECK-NEXT: br label [[FOR_END12]]
; CHECK: for.end12:
; CHECK-NEXT: [[CALL13:%.*]] = call i32 (ptr, ...) @foo(ptr nonnull [[S]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr nonnull [[S]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[S]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[I]])
; CHECK-NEXT: ret i32 0
;
entry:
%i = alloca i32, align 4
%s = alloca i16, align 2
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %i) #3
store i32 0, ptr %i, align 4
- call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %s) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %s) #3
%call = call i32 (ptr, ...) @goo(ptr nonnull %i) #3
%0 = load i32, ptr %i, align 4
%storemerge6 = trunc i32 %0 to i16
@@ -174,17 +174,17 @@ for.cond.for.end12_crit_edge: ; preds = %for.inc9
for.end12: ; preds = %for.cond.for.end12_crit_edge, %entry
%call13 = call i32 (ptr, ...) @foo(ptr nonnull %s) #3
- call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %s) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %s) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %i) #3
ret i32 0
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @goo(...) local_unnamed_addr #2
declare i32 @foo(...) local_unnamed_addr #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
diff --git a/llvm/test/Transforms/LoopVectorize/lifetime.ll b/llvm/test/Transforms/LoopVectorize/lifetime.ll
index 3dd41b5..61e8635 100644
--- a/llvm/test/Transforms/LoopVectorize/lifetime.ll
+++ b/llvm/test/Transforms/LoopVectorize/lifetime.ll
@@ -12,23 +12,23 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define void @test(ptr %d) {
entry:
%arr = alloca [1024 x i32], align 16
- call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.start.p0(ptr %arr) #1
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.end.p0(ptr %arr) #1
%arrayidx = getelementptr inbounds i32, ptr %d, i64 %indvars.iv
%0 = load i32, ptr %arrayidx, align 8
store i32 100, ptr %arrayidx, align 8
- call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.start.p0(ptr %arr) #1
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, 128
br i1 %exitcond, label %for.body, label %for.end
for.end:
- call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.end.p0(ptr %arr) #1
ret void
}
@@ -40,26 +40,26 @@ for.end:
define void @testbitcast(ptr %d) {
entry:
%arr = alloca [1024 x i32], align 16
- call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.start.p0(ptr %arr) #1
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.end.p0(ptr %arr) #1
%arrayidx = getelementptr inbounds i32, ptr %d, i64 %indvars.iv
%0 = load i32, ptr %arrayidx, align 8
store i32 100, ptr %arrayidx, align 8
- call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.start.p0(ptr %arr) #1
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, 128
br i1 %exitcond, label %for.body, label %for.end
for.end:
- call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.end.p0(ptr %arr) #1
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-lifetime.ll b/llvm/test/Transforms/LoopVectorize/scalable-lifetime.ll
index 7aac9d1..9cbf6b7 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-lifetime.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-lifetime.ll
@@ -10,7 +10,7 @@ define void @test(ptr %d) {
; CHECK-SAME: (ptr [[D:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ARR:%.*]] = alloca [1024 x i32], align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]])
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 128, [[TMP1]]
@@ -25,10 +25,10 @@ define void @test(ptr %d) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 2 x i32> splat (i32 100), ptr [[TMP6]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -40,39 +40,39 @@ define void @test(ptr %d) {
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: store i32 100, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]])
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[LFTR_WIDEIV]], 128
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: for.end:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
; CHECK-NEXT: ret void
;
entry:
%arr = alloca [1024 x i32], align 16
- call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.start.p0(ptr %arr) #1
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.end.p0(ptr %arr) #1
%arrayidx = getelementptr inbounds i32, ptr %d, i64 %indvars.iv
%0 = load i32, ptr %arrayidx, align 8
store i32 100, ptr %arrayidx, align 8
- call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.start.p0(ptr %arr) #1
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, 128
br i1 %exitcond, label %for.body, label %for.end, !llvm.loop !0
for.end:
- call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.end.p0(ptr %arr) #1
ret void
}
@@ -95,10 +95,10 @@ define void @testloopvariant(ptr %d) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDEX]]
; CHECK-NEXT: store <vscale x 2 x i32> splat (i32 100), ptr [[TMP6]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -111,11 +111,11 @@ define void @testloopvariant(ptr %d) {
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr [1024 x i32], ptr [[ARR]], i32 0, i64 [[INDVARS_IV]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ARR]])
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[D]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: store i32 100, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4096, ptr [[ARR]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ARR]])
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[LFTR_WIDEIV]], 128
@@ -130,11 +130,11 @@ entry:
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%0 = getelementptr [1024 x i32], ptr %arr, i32 0, i64 %indvars.iv
- call void @llvm.lifetime.end.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.end.p0(ptr %arr) #1
%arrayidx = getelementptr inbounds i32, ptr %d, i64 %indvars.iv
%1 = load i32, ptr %arrayidx, align 8
store i32 100, ptr %arrayidx, align 8
- call void @llvm.lifetime.start.p0(i64 4096, ptr %arr) #1
+ call void @llvm.lifetime.start.p0(ptr %arr) #1
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, 128
@@ -144,9 +144,9 @@ for.end:
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
!0 = distinct !{!0, !1}
!1 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll
index 7cc8458..612c96c 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-lifetime-ends.ll
@@ -73,7 +73,7 @@ define void @lifetime_for_first_arg_before_multiply(ptr noalias %B, ptr noalias
; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8
; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2
; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
entry:
@@ -81,7 +81,7 @@ entry:
call void @init(ptr %A)
%a = load <4 x double>, ptr %A, align 8
%b = load <4 x double>, ptr %B, align 8
- call void @llvm.lifetime.end(i64 -1, ptr %A)
+ call void @llvm.lifetime.end(ptr %A)
%c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
store <4 x double> %c, ptr %C, align 8
ret void
@@ -154,7 +154,7 @@ define void @lifetime_for_second_arg_before_multiply(ptr noalias %A, ptr noalias
; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8
; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2
; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]])
; CHECK-NEXT: ret void
;
entry:
@@ -162,7 +162,7 @@ entry:
call void @init(ptr %B)
%a = load <4 x double>, ptr %A, align 8
%b = load <4 x double>, ptr %B, align 8
- call void @llvm.lifetime.end(i64 -1, ptr %B)
+ call void @llvm.lifetime.end(ptr %B)
%c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
store <4 x double> %c, ptr %C, align 8
ret void
@@ -236,7 +236,7 @@ define void @lifetime_for_first_arg_before_multiply_load_from_offset(ptr noalias
; CHECK-NEXT: store <2 x double> [[TMP13]], ptr [[TMP26]], align 8
; CHECK-NEXT: [[VEC_GEP28:%.*]] = getelementptr double, ptr [[TMP26]], i64 2
; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
entry:
@@ -245,7 +245,7 @@ entry:
%gep.8 = getelementptr i8, ptr %A, i64 8
%a = load <4 x double>, ptr %gep.8, align 8
%b = load <4 x double>, ptr %B, align 8
- call void @llvm.lifetime.end(i64 -1, ptr %A)
+ call void @llvm.lifetime.end(ptr %A)
%c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
store <4 x double> %c, ptr %C, align 8
ret void
@@ -332,7 +332,7 @@ entry:
br i1 %c.0, label %then, label %exit
then:
- call void @llvm.lifetime.end(i64 -1, ptr %A)
+ call void @llvm.lifetime.end(ptr %A)
br label %exit
exit:
@@ -422,7 +422,7 @@ entry:
br i1 %c.0, label %then, label %exit
then:
- call void @llvm.lifetime.end(i64 -1, ptr %B)
+ call void @llvm.lifetime.end(ptr %B)
br label %exit
exit:
@@ -442,8 +442,8 @@ define void @multiple_unrelated_lifetimes(ptr noalias %C, i1 %c.0) {
; CHECK-NEXT: call void @init(ptr [[B]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ALLOC_1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[ALLOC_2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOC_1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[ALLOC_2]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A]], i64 0
@@ -522,10 +522,10 @@ entry:
br i1 %c.0, label %then, label %exit
then:
- call void @llvm.lifetime.end(i64 -1, ptr %B)
- call void @llvm.lifetime.end(i64 -1, ptr %alloc.1)
- call void @llvm.lifetime.end(i64 -1, ptr %A)
- call void @llvm.lifetime.end(i64 -1, ptr %alloc.2)
+ call void @llvm.lifetime.end(ptr %B)
+ call void @llvm.lifetime.end(ptr %alloc.1)
+ call void @llvm.lifetime.end(ptr %A)
+ call void @llvm.lifetime.end(ptr %alloc.2)
br label %exit
exit:
@@ -607,8 +607,8 @@ define void @lifetimes_for_args_in_different_blocks(ptr noalias %C, i1 %c.0) {
; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[VEC_GEP28]], align 8
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]])
; CHECK-NEXT: ret void
;
entry:
@@ -626,8 +626,8 @@ then:
br label %exit
exit:
- call void @llvm.lifetime.end(i64 -1, ptr %A)
- call void @llvm.lifetime.end(i64 -1, ptr %B)
+ call void @llvm.lifetime.end(ptr %A)
+ call void @llvm.lifetime.end(ptr %B)
ret void
}
@@ -640,8 +640,8 @@ define void @lifetimes_for_args_in_different_blocks2(ptr noalias %C, i1 %c.0) {
; CHECK-NEXT: call void @init(ptr [[B]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[THEN:%.*]], label [[EXIT:%.*]]
; CHECK: then:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[B]])
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[A]], i64 0
@@ -716,8 +716,8 @@ entry:
br i1 %c.0, label %then, label %exit
then:
- call void @llvm.lifetime.end(i64 -1, ptr %A)
- call void @llvm.lifetime.end(i64 -1, ptr %B)
+ call void @llvm.lifetime.end(ptr %A)
+ call void @llvm.lifetime.end(ptr %B)
br label %exit
exit:
@@ -809,7 +809,7 @@ entry:
call void @init(ptr %A)
call void @init(ptr %B)
%a = load <4 x double>, ptr %A, align 8
- call void @llvm.lifetime.end(i64 -1, ptr %A)
+ call void @llvm.lifetime.end(ptr %A)
br i1 %c.0, label %then, label %exit
then:
@@ -819,7 +819,7 @@ then:
br label %exit
exit:
- call void @llvm.lifetime.end(i64 -1, ptr %B)
+ call void @llvm.lifetime.end(ptr %B)
ret void
}
@@ -904,7 +904,7 @@ entry:
call void @init(ptr %A)
call void @init(ptr %B)
%b = load <4 x double>, ptr %B, align 8
- call void @llvm.lifetime.end(i64 -1, ptr %B)
+ call void @llvm.lifetime.end(ptr %B)
br i1 %c.0, label %then, label %exit
then:
@@ -914,11 +914,11 @@ then:
br label %exit
exit:
- call void @llvm.lifetime.end(i64 -1, ptr %A)
+ call void @llvm.lifetime.end(ptr %A)
ret void
}
declare void @init(ptr)
-declare void @llvm.lifetime.end(i64, ptr)
+declare void @llvm.lifetime.end(ptr)
declare <4 x double> @llvm.matrix.multiply(<4 x double>, <4 x double>, i32, i32, i32)
diff --git a/llvm/test/Transforms/Mem2Reg/alloca_addrspace.ll b/llvm/test/Transforms/Mem2Reg/alloca_addrspace.ll
index 87ff922..f7e84274 100644
--- a/llvm/test/Transforms/Mem2Reg/alloca_addrspace.ll
+++ b/llvm/test/Transforms/Mem2Reg/alloca_addrspace.ll
@@ -10,6 +10,6 @@ define amdgpu_kernel void @addressspace_alloca() {
; CHECK-NEXT: ret void
;
%alloca = alloca i8, align 8, addrspace(5)
- call void @llvm.lifetime.start(i64 2, ptr addrspace(5) %alloca)
+ call void @llvm.lifetime.start(ptr addrspace(5) %alloca)
ret void
}
diff --git a/llvm/test/Transforms/Mem2Reg/ignore-droppable.ll b/llvm/test/Transforms/Mem2Reg/ignore-droppable.ll
index d4bc097..a876319 100644
--- a/llvm/test/Transforms/Mem2Reg/ignore-droppable.ll
+++ b/llvm/test/Transforms/Mem2Reg/ignore-droppable.ll
@@ -2,8 +2,8 @@
; RUN: opt -passes=mem2reg -S -o - < %s | FileCheck %s
declare void @llvm.assume(i1)
-declare void @llvm.lifetime.start.p0(i64 %size, ptr nocapture %ptr)
-declare void @llvm.lifetime.end.p0(i64 %size, ptr nocapture %ptr)
+declare void @llvm.lifetime.start.p0(ptr nocapture %ptr)
+declare void @llvm.lifetime.end.p0(ptr nocapture %ptr)
define void @positive_assume_uses(ptr %arg) {
; CHECK-LABEL: @positive_assume_uses(
@@ -54,10 +54,10 @@ define void @positive_gep_assume_uses() {
;
%A = alloca {i8, i16}
%B = getelementptr {i8, i16}, ptr %A, i32 0, i32 0
- call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
call void @llvm.assume(i1 true) ["align"(ptr %B, i64 8), "align"(ptr %B, i64 16)]
store {i8, i16} zeroinitializer, ptr %A
- call void @llvm.lifetime.end.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
call void @llvm.assume(i1 true) ["nonnull"(ptr %B), "align"(ptr %B, i64 2)]
ret void
}
@@ -70,10 +70,10 @@ define void @positive_mixed_assume_uses() {
; CHECK-NEXT: ret void
;
%A = alloca i8
- call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 8), "align"(ptr %A, i64 16)]
store i8 1, ptr %A
- call void @llvm.lifetime.end.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 2), "nonnull"(ptr %A)]
call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 2), "nonnull"(ptr %A)]
ret void
diff --git a/llvm/test/Transforms/Mem2Reg/ignore-lifetime.ll b/llvm/test/Transforms/Mem2Reg/ignore-lifetime.ll
index bcc9693..510fb2b 100644
--- a/llvm/test/Transforms/Mem2Reg/ignore-lifetime.ll
+++ b/llvm/test/Transforms/Mem2Reg/ignore-lifetime.ll
@@ -1,15 +1,15 @@
; RUN: opt -passes=mem2reg -S -o - < %s | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64 %size, ptr nocapture %ptr)
-declare void @llvm.lifetime.end.p0(i64 %size, ptr nocapture %ptr)
+declare void @llvm.lifetime.start.p0(ptr nocapture %ptr)
+declare void @llvm.lifetime.end.p0(ptr nocapture %ptr)
define void @test1() {
; CHECK: test1
; CHECK-NOT: alloca
%A = alloca i32
- call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
store i32 1, ptr %A
- call void @llvm.lifetime.end.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
ret void
}
@@ -17,8 +17,8 @@ define void @test2() {
; CHECK: test2
; CHECK-NOT: alloca
%A = alloca {i8, i16}
- call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
store {i8, i16} zeroinitializer, ptr %A
- call void @llvm.lifetime.end.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
ret void
}
diff --git a/llvm/test/Transforms/MemCpyOpt/callslot_badaa.ll b/llvm/test/Transforms/MemCpyOpt/callslot_badaa.ll
index 601498e..a0c0e9f 100644
--- a/llvm/test/Transforms/MemCpyOpt/callslot_badaa.ll
+++ b/llvm/test/Transforms/MemCpyOpt/callslot_badaa.ll
@@ -5,7 +5,7 @@ declare void @use(ptr)
; Make sure callslot optimization merges alias.scope metadata correctly when it merges instructions.
; Merging here naively generates:
; call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %src, i64 1, i1 false), !alias.scope !3
-; call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %src), !noalias !0
+; call void @llvm.lifetime.end.p0(ptr nonnull %src), !noalias !0
; ...
; !0 = !{!1}
; !1 = distinct !{!1, !2, !"callee1: %a"}
@@ -20,18 +20,18 @@ define i8 @test(i8 %input) {
%src = alloca i8
; NOTE: we're matching the full line and looking for the lack of !alias.scope here
; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %src, i64 1, i1 false)
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %src), !noalias !3
+ call void @llvm.lifetime.start.p0(ptr nonnull %src), !noalias !3
store i8 %input, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp, ptr align 8 %src, i64 1, i1 false), !alias.scope !0
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %src), !noalias !3
+ call void @llvm.lifetime.end.p0(ptr nonnull %src), !noalias !3
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %dst, ptr align 8 %tmp, i64 1, i1 false), !alias.scope !3
%ret_value = load i8, ptr %dst
call void @use(ptr %src)
ret i8 %ret_value
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)
!0 = !{!1}
diff --git a/llvm/test/Transforms/MemCpyOpt/capturing-func.ll b/llvm/test/Transforms/MemCpyOpt/capturing-func.ll
index 47c4358..c08f60a 100644
--- a/llvm/test/Transforms/MemCpyOpt/capturing-func.ll
+++ b/llvm/test/Transforms/MemCpyOpt/capturing-func.ll
@@ -5,8 +5,8 @@ target datalayout = "e"
declare void @foo(ptr)
declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Check that the transformation isn't applied if the called function can
; capture the pointer argument (i.e. the nocapture attribute isn't present)
@@ -51,18 +51,18 @@ define void @test_lifetime_end() {
; CHECK-LABEL: define {{[^@]+}}@test_lifetime_end() {
; CHECK-NEXT: [[PTR1:%.*]] = alloca i8, align 1
; CHECK-NEXT: [[PTR2:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[PTR2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTR2]])
; CHECK-NEXT: call void @foo(ptr [[PTR1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[PTR2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTR2]])
; CHECK-NEXT: call void @foo(ptr [[PTR1]])
; CHECK-NEXT: ret void
;
%ptr1 = alloca i8
%ptr2 = alloca i8
- call void @llvm.lifetime.start.p0(i64 1, ptr %ptr2)
+ call void @llvm.lifetime.start.p0(ptr %ptr2)
call void @foo(ptr %ptr2)
call void @llvm.memcpy.p0.p0.i32(ptr %ptr1, ptr %ptr2, i32 1, i1 false)
- call void @llvm.lifetime.end.p0(i64 1, ptr %ptr2)
+ call void @llvm.lifetime.end.p0(ptr %ptr2)
call void @foo(ptr %ptr1)
ret void
}
diff --git a/llvm/test/Transforms/MemCpyOpt/lifetime-missing.ll b/llvm/test/Transforms/MemCpyOpt/lifetime-missing.ll
index 0626f09..06d9434 100644
--- a/llvm/test/Transforms/MemCpyOpt/lifetime-missing.ll
+++ b/llvm/test/Transforms/MemCpyOpt/lifetime-missing.ll
@@ -7,7 +7,7 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
target triple = "x86_64-grtev4-linux-gnu"
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
define void @test() {
@@ -26,7 +26,7 @@ entry:
%agg.tmp.sroa.14 = alloca [20 x i8], align 4
%agg.tmp.sroa.14.128.sroa_idx = getelementptr i8, ptr %agg.tmp.sroa.14, i64 4
call void @llvm.memset.p0.i64(ptr %agg.tmp.sroa.14.128.sroa_idx, i8 0, i64 1, i1 false)
- call void @llvm.lifetime.start.p0(i64 20, ptr %agg.tmp3.sroa.35)
+ call void @llvm.lifetime.start.p0(ptr %agg.tmp3.sroa.35)
call void @llvm.memcpy.p0.p0.i64(ptr %agg.tmp3.sroa.35, ptr %agg.tmp.sroa.14, i64 20, i1 false)
%agg.tmp3.sroa.35.128.sroa_idx = getelementptr i8, ptr %agg.tmp3.sroa.35, i64 4
call void @llvm.memcpy.p0.p0.i64(ptr inttoptr (i64 4 to ptr), ptr %agg.tmp3.sroa.35.128.sroa_idx, i64 1, i1 false)
diff --git a/llvm/test/Transforms/MemCpyOpt/lifetime.ll b/llvm/test/Transforms/MemCpyOpt/lifetime.ll
index e9fc06b..4eab12a 100644
--- a/llvm/test/Transforms/MemCpyOpt/lifetime.ll
+++ b/llvm/test/Transforms/MemCpyOpt/lifetime.ll
@@ -5,46 +5,46 @@
; @llvm.lifetime.start and @llvm.memcpy.
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @call_slot(ptr nocapture dereferenceable(16) %arg1) {
; CHECK-LABEL: @call_slot(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP:%.*]] = alloca [8 x i8], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[TMP]])
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 7
; CHECK-NEXT: store i8 0, ptr [[TMP10]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[TMP]])
; CHECK-NEXT: ret void
;
bb:
%tmp = alloca [8 x i8], align 8
- call void @llvm.lifetime.start.p0(i64 16, ptr %tmp)
+ call void @llvm.lifetime.start.p0(ptr %tmp)
%tmp10 = getelementptr inbounds i8, ptr %tmp, i64 7
store i8 0, ptr %tmp10, align 1
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %arg1, ptr align 8 %tmp, i64 16, i1 false)
- call void @llvm.lifetime.end.p0(i64 16, ptr %tmp)
+ call void @llvm.lifetime.end.p0(ptr %tmp)
ret void
}
define void @memcpy_memcpy_across_lifetime(ptr noalias %p1, ptr noalias %p2, ptr noalias %p3) {
; CHECK-LABEL: @memcpy_memcpy_across_lifetime(
; CHECK-NEXT: [[A:%.*]] = alloca [16 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[A]], ptr [[P1:%.*]], i64 16, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[P1]], ptr [[P2:%.*]], i64 16, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[P2]], ptr [[A]], i64 16, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[P3:%.*]], ptr [[P2]], i64 16, i1 false)
; CHECK-NEXT: ret void
;
%a = alloca [16 x i8]
- call void @llvm.lifetime.start.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memcpy.p0.p0.i64(ptr %a, ptr %p1, i64 16, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr %p2, i64 16, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr %p2, ptr %a, i64 16, i1 false)
- call void @llvm.lifetime.end.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
call void @llvm.memcpy.p0.p0.i64(ptr %p3, ptr %p2, i64 16, i1 false)
ret void
}
@@ -55,18 +55,18 @@ define i32 @call_slot_move_lifetime_start() {
; CHECK-LABEL: @call_slot_move_lifetime_start(
; CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[DST:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DST]])
; CHECK-NEXT: call void @call(ptr [[DST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DST]])
; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[DST]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
%tmp = alloca i32
%dst = alloca i32
call void @call(ptr %tmp)
- call void @llvm.lifetime.start.p0(i64 4, ptr %dst)
+ call void @llvm.lifetime.start.p0(ptr %dst)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %tmp, i64 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr %dst)
+ call void @llvm.lifetime.end.p0(ptr %dst)
%v = load i32, ptr %dst
ret i32 %v
}
@@ -76,20 +76,20 @@ define i32 @call_slot_two_lifetime_starts() {
; CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[DST:%.*]] = alloca i32, align 4
; CHECK-NEXT: call void @call(ptr [[TMP]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DST]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DST]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DST]], ptr align 4 [[TMP]], i64 4, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DST]])
; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[DST]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
%tmp = alloca i32
%dst = alloca i32
call void @call(ptr %tmp)
- call void @llvm.lifetime.start.p0(i64 4, ptr %dst)
- call void @llvm.lifetime.start.p0(i64 4, ptr %dst)
+ call void @llvm.lifetime.start.p0(ptr %dst)
+ call void @llvm.lifetime.start.p0(ptr %dst)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %tmp, i64 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr %dst)
+ call void @llvm.lifetime.end.p0(ptr %dst)
%v = load i32, ptr %dst
ret i32 %v
}
@@ -100,9 +100,9 @@ define i32 @call_slot_clobber_before_lifetime_start() {
; CHECK-NEXT: [[DST:%.*]] = alloca i32, align 4
; CHECK-NEXT: call void @call(ptr [[TMP]])
; CHECK-NEXT: store i32 0, ptr [[DST]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[DST]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DST]], ptr align 4 [[TMP]], i64 4, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[DST]])
; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[DST]], align 4
; CHECK-NEXT: ret i32 [[V]]
;
@@ -110,9 +110,9 @@ define i32 @call_slot_clobber_before_lifetime_start() {
%dst = alloca i32
call void @call(ptr %tmp)
store i32 0, ptr %dst
- call void @llvm.lifetime.start.p0(i64 4, ptr %dst)
+ call void @llvm.lifetime.start.p0(ptr %dst)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %tmp, i64 4, i1 false)
- call void @llvm.lifetime.end.p0(i64 4, ptr %dst)
+ call void @llvm.lifetime.end.p0(ptr %dst)
%v = load i32, ptr %dst
ret i32 %v
}
diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-byval-forwarding-clobbers.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-byval-forwarding-clobbers.ll
index 383040c..e1b32cd 100644
--- a/llvm/test/Transforms/MemCpyOpt/memcpy-byval-forwarding-clobbers.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memcpy-byval-forwarding-clobbers.ll
@@ -7,8 +7,8 @@ declare i1 @check(ptr readonly byval(i64) align 8) readonly argmemonly
declare void @clobber(ptr) argmemonly
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
; %a.2's lifetime ends before the call to @check. We must remove the call to
@@ -25,11 +25,11 @@ define i1 @alloca_forwarding_lifetime_end_clobber() {
entry:
%a.1 = alloca i64, align 8
%a.2 = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %a.2)
+ call void @llvm.lifetime.start.p0(ptr %a.2)
call void @init(ptr sret(i64) align 8 %a.2)
store i8 0, ptr %a.2
call void @llvm.memcpy.p0.p0.i64(ptr %a.1, ptr %a.2, i64 8, i1 false)
- call void @llvm.lifetime.end.p0(i64 8, ptr %a.2)
+ call void @llvm.lifetime.end.p0(ptr %a.2)
;call void @clobber(ptr %a.2)
%call = call i1 @check(ptr byval(i64) align 8 %a.1)
ret i1 %call
@@ -42,7 +42,7 @@ define i1 @alloca_forwarding_call_clobber() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_1:%.*]] = alloca i64, align 8
; CHECK-NEXT: [[A_2:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A_2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_2]])
; CHECK-NEXT: call void @init(ptr sret(i64) align 8 [[A_2]])
; CHECK-NEXT: store i8 0, ptr [[A_2]], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[A_1]], ptr [[A_2]], i64 8, i1 false)
@@ -53,7 +53,7 @@ define i1 @alloca_forwarding_call_clobber() {
entry:
%a.1 = alloca i64, align 8
%a.2 = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %a.2)
+ call void @llvm.lifetime.start.p0(ptr %a.2)
call void @init(ptr sret(i64) align 8 %a.2)
store i8 0, ptr %a.2
call void @llvm.memcpy.p0.p0.i64(ptr %a.1, ptr %a.2, i64 8, i1 false)
@@ -67,7 +67,7 @@ define i1 @alloca_forwarding_call_clobber_after() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A_1:%.*]] = alloca i64, align 8
; CHECK-NEXT: [[A_2:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A_2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A_2]])
; CHECK-NEXT: call void @init(ptr sret(i64) align 8 [[A_2]])
; CHECK-NEXT: store i8 0, ptr [[A_2]], align 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[A_1]], ptr [[A_2]], i64 8, i1 false)
@@ -78,7 +78,7 @@ define i1 @alloca_forwarding_call_clobber_after() {
entry:
%a.1 = alloca i64, align 8
%a.2 = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %a.2)
+ call void @llvm.lifetime.start.p0(ptr %a.2)
call void @init(ptr sret(i64) align 8 %a.2)
store i8 0, ptr %a.2
call void @llvm.memcpy.p0.p0.i64(ptr %a.1, ptr %a.2, i64 8, i1 false)
@@ -102,7 +102,7 @@ entry:
%a.1 = alloca i64, align 8
%a.2 = alloca i64, align 8
%a.3 = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %a.2)
+ call void @llvm.lifetime.start.p0(ptr %a.2)
call void @init(ptr sret(i64) align 8 %a.2)
store i8 0, ptr %a.2
call void @llvm.memcpy.p0.p0.i64(ptr %a.1, ptr %a.2, i64 8, i1 false)
diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-gep-modification.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-gep-modification.ll
index ba6faf3..5e81c0d 100644
--- a/llvm/test/Transforms/MemCpyOpt/memcpy-gep-modification.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memcpy-gep-modification.ll
@@ -3,8 +3,8 @@
%struct.MaskedType = type { i8, i8 }
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
declare void @MaskedFunction1(ptr, ptr addrspace(1))
declare void @MaskedFunction2(ptr, ptr)
@@ -13,11 +13,11 @@ define i8 @test_gep_not_modified(ptr %in0, ptr %in1) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[FUNCALLOC:%.*]] = alloca [[STRUCT_MASKEDTYPE:%.*]], align 4
; CHECK-NEXT: [[PTRALLOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[PTRALLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTRALLOC]])
; CHECK-NEXT: [[ADDRSPACECAST:%.*]] = addrspacecast ptr [[PTRALLOC]] to ptr addrspace(1)
; CHECK-NEXT: call void @MaskedFunction1(ptr [[IN1:%.*]], ptr addrspace(1) [[ADDRSPACECAST]])
; CHECK-NEXT: [[LOAD1:%.*]] = load i8, ptr [[PTRALLOC]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[PTRALLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTRALLOC]])
; CHECK-NEXT: [[GETELEMPTR1:%.*]] = getelementptr inbounds [[STRUCT_MASKEDTYPE]], ptr [[FUNCALLOC]], i32 0, i32 1
; CHECK-NEXT: store i8 [[LOAD1]], ptr [[GETELEMPTR1]], align 1
; CHECK-NEXT: ret i8 0
@@ -25,11 +25,11 @@ define i8 @test_gep_not_modified(ptr %in0, ptr %in1) {
entry:
%funcAlloc = alloca %struct.MaskedType, align 4
%ptrAlloc = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 4, ptr %ptrAlloc) #0
+ call void @llvm.lifetime.start.p0(ptr %ptrAlloc) #0
%addrspaceCast = addrspacecast ptr %ptrAlloc to ptr addrspace(1)
call void @MaskedFunction1(ptr %in1, ptr addrspace(1) %addrspaceCast)
%load1 = load i8, ptr %ptrAlloc, align 1
- call void @llvm.lifetime.end.p0(i64 4, ptr %ptrAlloc) #0
+ call void @llvm.lifetime.end.p0(ptr %ptrAlloc) #0
%getElemPtr1 = getelementptr inbounds %struct.MaskedType, ptr %funcAlloc, i32 0, i32 1
store i8 %load1, ptr %getElemPtr1, align 1
ret i8 0
@@ -40,19 +40,19 @@ define i8 @test_gep_modified(ptr %in0, ptr %in1) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[FUNCALLOC:%.*]] = alloca [[STRUCT_MASKEDTYPE:%.*]], align 4
; CHECK-NEXT: [[PTRALLOC:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[PTRALLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[PTRALLOC]])
; CHECK-NEXT: [[GETELEMPTR1:%.*]] = getelementptr inbounds [[STRUCT_MASKEDTYPE]], ptr [[FUNCALLOC]], i32 0, i32 1
; CHECK-NEXT: call void @MaskedFunction2(ptr [[IN1:%.*]], ptr [[GETELEMPTR1]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[PTRALLOC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[PTRALLOC]])
; CHECK-NEXT: ret i8 0
;
entry:
%funcAlloc = alloca %struct.MaskedType, align 4
%ptrAlloc = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 4, ptr %ptrAlloc) #0
+ call void @llvm.lifetime.start.p0(ptr %ptrAlloc) #0
call void @MaskedFunction2(ptr %in1, ptr %ptrAlloc)
%load1 = load i8, ptr %ptrAlloc, align 1
- call void @llvm.lifetime.end.p0(i64 4, ptr %ptrAlloc) #0
+ call void @llvm.lifetime.end.p0(ptr %ptrAlloc) #0
%getElemPtr1 = getelementptr inbounds %struct.MaskedType, ptr %funcAlloc, i32 0, i32 1
store i8 %load1, ptr %getElemPtr1, align 1
ret i8 0
diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll
index 1771fe6..7a7f8e1 100644
--- a/llvm/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memcpy-to-memset-with-lifetimes.ll
@@ -12,10 +12,10 @@ define void @foo(ptr noalias nocapture sret([8 x i64]) dereferenceable(64) %sret
;
entry-block:
%a = alloca [8 x i64], align 8
- call void @llvm.lifetime.start.p0(i64 64, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memset.p0.i64(ptr align 8 %a, i8 0, i64 64, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %sret, ptr align 8 %a, i64 64, i1 false)
- call void @llvm.lifetime.end.p0(i64 64, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -24,28 +24,28 @@ define void @bar(ptr noalias nocapture sret([8 x i64]) dereferenceable(64) %sret
; CHECK-LABEL: @bar(
; CHECK-NEXT: entry-block:
; CHECK-NEXT: [[A:%.*]] = alloca [8 x i64], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 64, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]])
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(64) [[A]], i8 0, i64 64, i1 false)
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(64) [[SRET:%.*]], i8 0, i64 64, i1 false)
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(32) [[A]], i8 42, i64 32, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 8 dereferenceable(64) [[OUT:%.*]], ptr noundef nonnull align 8 dereferenceable(64) [[A]], i64 64, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]])
; CHECK-NEXT: ret void
;
entry-block:
%a = alloca [8 x i64], align 8
- call void @llvm.lifetime.start.p0(i64 64, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memset.p0.i64(ptr align 8 %a, i8 0, i64 64, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %sret, ptr align 8 %a, i64 64, i1 false)
call void @llvm.memset.p0.i64(ptr align 8 %a, i8 42, i64 32, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %out, ptr align 8 %a, i64 64, i1 false)
- call void @llvm.lifetime.end.p0(i64 64, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) nounwind
declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll
index 84253dc..6e28811 100644
--- a/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll
@@ -29,11 +29,11 @@ define i32 @test1(ptr nocapture %foobie) nounwind noinline ssp uwtable {
define void @test2(ptr sret(i8) noalias nocapture %out) nounwind noinline ssp uwtable {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[IN:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[IN]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[IN]])
; CHECK-NEXT: ret void
;
%in = alloca i64
- call void @llvm.lifetime.start.p0(i64 8, ptr %in)
+ call void @llvm.lifetime.start.p0(ptr %in)
call void @llvm.memcpy.p0.p0.i64(ptr %out, ptr %in, i64 8, i1 false)
ret void
}
@@ -42,12 +42,12 @@ define void @test2(ptr sret(i8) noalias nocapture %out) nounwind noinline ssp uw
define void @test_lifetime_may_alias(ptr %src, ptr %dst) {
; CHECK-LABEL: @test_lifetime_may_alias(
; CHECK-NEXT: [[LIFETIME:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[LIFETIME]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[LIFETIME]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 8, i1 false)
; CHECK-NEXT: ret void
;
%lifetime = alloca i64
- call void @llvm.lifetime.start.p0(i64 8, ptr %lifetime)
+ call void @llvm.lifetime.start.p0(ptr %lifetime)
call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 8, i1 false)
ret void
}
@@ -56,12 +56,12 @@ define void @test_lifetime_may_alias(ptr %src, ptr %dst) {
define void @test_lifetime_partial_alias_1(ptr noalias %dst) {
; CHECK-LABEL: @test_lifetime_partial_alias_1(
; CHECK-NEXT: [[A:%.*]] = alloca [16 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 8
; CHECK-NEXT: ret void
;
%a = alloca [16 x i8]
- call void @llvm.lifetime.start.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
%gep = getelementptr i8, ptr %a, i64 8
call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %gep, i64 8, i1 false)
ret void
@@ -71,12 +71,12 @@ define void @test_lifetime_partial_alias_1(ptr noalias %dst) {
define void @test_lifetime_partial_alias_2(ptr noalias %dst) {
; CHECK-LABEL: @test_lifetime_partial_alias_2(
; CHECK-NEXT: [[A:%.*]] = alloca [16 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 8
; CHECK-NEXT: ret void
;
%a = alloca [16 x i8]
- call void @llvm.lifetime.start.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
%gep = getelementptr i8, ptr %a, i64 8
call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %gep, i64 16, i1 false)
ret void
@@ -84,4 +84,4 @@ define void @test_lifetime_partial_alias_2(ptr noalias %dst) {
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
diff --git a/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll b/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll
index 343f951..2575d58 100644
--- a/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll
@@ -23,17 +23,17 @@ define void @test_alloca(ptr %result) {
define void @test_alloca_with_lifetimes(ptr %result) {
; CHECK-LABEL: @test_alloca_with_lifetimes(
; CHECK-NEXT: [[A:%.*]] = alloca [[T:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[A]], i8 0, i64 12, i1 false)
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr [[RESULT:%.*]], i8 0, i64 12, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca %T, align 8
- call void @llvm.lifetime.start.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
call void @llvm.memset.p0.i64(ptr align 8 %a, i8 0, i64 12, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr %result, ptr align 8 %a, i64 16, i1 false)
- call void @llvm.lifetime.end.p0(i64 16, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -201,5 +201,5 @@ declare void @free(ptr)
declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/MemCpyOpt/pr29105.ll b/llvm/test/Transforms/MemCpyOpt/pr29105.ll
index d47bddd..f4538b9 100644
--- a/llvm/test/Transforms/MemCpyOpt/pr29105.ll
+++ b/llvm/test/Transforms/MemCpyOpt/pr29105.ll
@@ -7,34 +7,34 @@ define void @baz() unnamed_addr #0 {
; CHECK-LABEL: @baz(
; CHECK-NEXT: entry-block:
; CHECK-NEXT: [[TMP2:%.*]] = alloca [[FOO:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16384, ptr nonnull [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP2]])
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 8 dereferenceable(16384) [[TMP2]], i8 0, i64 16384, i1 false)
; CHECK-NEXT: call void @bar(ptr noalias nonnull captures(none) dereferenceable(16384) [[TMP2]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 16384, ptr nonnull [[TMP2]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP2]])
; CHECK-NEXT: ret void
;
entry-block:
%x.sroa.0 = alloca [2048 x i64], align 8
%tmp0 = alloca [2048 x i64], align 8
%tmp2 = alloca %Foo, align 8
- call void @llvm.lifetime.start.p0(i64 16384, ptr %x.sroa.0)
- call void @llvm.lifetime.start.p0(i64 16384, ptr %tmp0)
+ call void @llvm.lifetime.start.p0(ptr %x.sroa.0)
+ call void @llvm.lifetime.start.p0(ptr %tmp0)
call void @llvm.memset.p0.i64(ptr align 8 %tmp0, i8 0, i64 16384, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %x.sroa.0, ptr align 8 %tmp0, i64 16384, i1 false)
- call void @llvm.lifetime.end.p0(i64 16384, ptr %tmp0)
- call void @llvm.lifetime.start.p0(i64 16384, ptr %tmp2)
+ call void @llvm.lifetime.end.p0(ptr %tmp0)
+ call void @llvm.lifetime.start.p0(ptr %tmp2)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %tmp2, ptr align 8 %x.sroa.0, i64 16384, i1 false)
call void @bar(ptr noalias nocapture nonnull dereferenceable(16384) %tmp2)
- call void @llvm.lifetime.end.p0(i64 16384, ptr %tmp2)
- call void @llvm.lifetime.end.p0(i64 16384, ptr %x.sroa.0)
+ call void @llvm.lifetime.end.p0(ptr %tmp2)
+ call void @llvm.lifetime.end.p0(ptr %x.sroa.0)
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare void @bar(ptr noalias nocapture readonly dereferenceable(16384)) unnamed_addr #0
diff --git a/llvm/test/Transforms/MemCpyOpt/preserve-memssa.ll b/llvm/test/Transforms/MemCpyOpt/preserve-memssa.ll
index ff36bf0..e1a6c3f 100644
--- a/llvm/test/Transforms/MemCpyOpt/preserve-memssa.ll
+++ b/llvm/test/Transforms/MemCpyOpt/preserve-memssa.ll
@@ -78,7 +78,7 @@ define void @test5(ptr %ptr) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[EARLY_DATA:%.*]] = alloca [128 x i8], align 8
; CHECK-NEXT: [[TMP:%.*]] = alloca [[T:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[EARLY_DATA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[EARLY_DATA]])
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[PTR:%.*]], align 8
; CHECK-NEXT: call fastcc void @decompose(ptr [[TMP]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[EARLY_DATA]], ptr [[TMP]], i64 32, i1 false)
@@ -87,7 +87,7 @@ define void @test5(ptr %ptr) {
entry:
%early_data = alloca [128 x i8], align 8
%tmp = alloca %t, align 8
- call void @llvm.lifetime.start.p0(i64 32, ptr %early_data)
+ call void @llvm.lifetime.start.p0(ptr %early_data)
%0 = load i32, ptr %ptr, align 8
call fastcc void @decompose(ptr %tmp)
call void @llvm.memcpy.p0.p0.i64(ptr %early_data, ptr %tmp, i64 32, i1 false)
@@ -131,7 +131,7 @@ define void @test8(ptr noalias %src, ptr %dst) {
declare void @clobber()
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
; Function Attrs: argmemonly nounwind willreturn
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #0
diff --git a/llvm/test/Transforms/MemCpyOpt/stack-move.ll b/llvm/test/Transforms/MemCpyOpt/stack-move.ll
index 31e255b..940e30e 100644
--- a/llvm/test/Transforms/MemCpyOpt/stack-move.ll
+++ b/llvm/test/Transforms/MemCpyOpt/stack-move.ll
@@ -9,12 +9,12 @@ declare void @llvm.memcpy.p2.p1.i64(ptr addrspace(2) noalias nocapture writeonly
declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1 immarg)
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.start.p1(i64, ptr addrspace(1) nocapture)
-declare void @llvm.lifetime.end.p1(i64, ptr addrspace(1) nocapture)
-declare void @llvm.lifetime.start.p2(i64, ptr addrspace(2) nocapture)
-declare void @llvm.lifetime.end.p2(i64, ptr addrspace(2) nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
+declare void @llvm.lifetime.start.p1(ptr addrspace(1) nocapture)
+declare void @llvm.lifetime.end.p1(ptr addrspace(1) nocapture)
+declare void @llvm.lifetime.start.p2(ptr addrspace(2) nocapture)
+declare void @llvm.lifetime.end.p2(ptr addrspace(2) nocapture)
declare i32 @use_nocapture(ptr nocapture)
declare i32 @use_maycapture(ptr noundef)
@@ -31,8 +31,8 @@ define void @basic_memcpy() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -40,8 +40,8 @@ define void @basic_memcpy() {
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -74,8 +74,8 @@ define void @basic_memmove() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -83,8 +83,8 @@ define void @basic_memmove() {
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -99,8 +99,8 @@ define void @load_store() {
;
%src = alloca i32, align 4
%dest = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store i32 42, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -108,8 +108,8 @@ define void @load_store() {
store i32 %src.val, ptr %dest
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -125,8 +125,8 @@ define void @load_store_scalable(<vscale x 4 x i32> %x) {
;
%src = alloca <vscale x 4 x i32>
%dest = alloca <vscale x 4 x i32>
- call void @llvm.lifetime.start.p0(i64 -1, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 -1, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store <vscale x 4 x i32> %x, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -135,8 +135,8 @@ define void @load_store_scalable(<vscale x 4 x i32> %x) {
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 -1, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 -1, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -151,16 +151,16 @@ define void @align_up() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 8
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -177,21 +177,21 @@ define void @remove_extra_lifetime_intrinsics() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
%3 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -230,8 +230,8 @@ define void @alias_no_mod() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
%dest.alias = getelementptr %struct.Foo, ptr %dest, i32 0, i32 0
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -240,8 +240,8 @@ define void @alias_no_mod() {
%src.alias = getelementptr %struct.Foo, ptr %src, i32 0, i32 0
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -265,16 +265,16 @@ define void @remove_scoped_noalias() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src), !alias.scope !2
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
%2 = call i32 @use_nocapture(ptr nocapture %dest), !noalias !2
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -289,16 +289,16 @@ define void @remove_alloca_metadata() {
;
%src = alloca %struct.Foo, align 4, !annotation !3
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src), !alias.scope !2
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest), !noalias !2
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -314,16 +314,16 @@ define void @noalias_on_lifetime() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src), !alias.scope !2
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src), !alias.scope !2
+ call void @llvm.lifetime.end.p0(ptr nocapture %src), !alias.scope !2
%2 = call i32 @use_nocapture(ptr nocapture %dest), !noalias !2
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest), !noalias !2
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest), !noalias !2
ret void
}
@@ -338,16 +338,16 @@ define void @src_ref_dest_ref_after_copy() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%1 = call i32 @use_readonly(ptr nocapture %src)
%2 = call i32 @use_readonly(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -362,16 +362,16 @@ define void @src_mod_dest_mod_after_copy() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%1 = call i32 @use_writeonly(ptr nocapture %src)
%2 = call i32 @use_writeonly(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -384,10 +384,10 @@ define void @avoid_memory_use_last_user_crash() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
%v = load i32, ptr %dest
ret void
}
@@ -409,14 +409,14 @@ define void @terminator_lastuse() personality i32 0 {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr %src)
+ call void @llvm.lifetime.end.p0(ptr %src)
%rv = invoke i32 @use_nocapture(ptr %dest)
to label %suc unwind label %unw
unw:
@@ -441,8 +441,8 @@ define void @multi_bb_memcpy(i1 %b) {
;
%src = alloca i32, align 4
%dest = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store i32 42, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
br label %bb0
@@ -453,8 +453,8 @@ bb0:
bb1:
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -471,8 +471,8 @@ define void @multi_bb_load_store(i1 %b) {
;
%src = alloca i32, align 4
%dest = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store i32 42, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -482,8 +482,8 @@ define void @multi_bb_load_store(i1 %b) {
bb0:
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -494,8 +494,8 @@ define void @multi_bb_separated_load_store(i1 %b) {
; CHECK-SAME: (i1 [[B:%.*]]) {
; CHECK-NEXT: [[SRC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store i32 42, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: [[SRC_VAL:%.*]] = load i32, ptr [[SRC]], align 4
@@ -505,14 +505,14 @@ define void @multi_bb_separated_load_store(i1 %b) {
; CHECK-NEXT: br label [[BB1:%.*]]
; CHECK: bb1:
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca i32, align 4
%dest = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store i32 42, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -525,8 +525,8 @@ bb0:
bb1:
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -548,8 +548,8 @@ define void @multi_bb_simple_br(i1 %b) {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
@@ -564,8 +564,8 @@ bb1:
br label %bb2
bb2:
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -598,7 +598,7 @@ bb1:
br label %bb2
bb2:
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%1 = call i32 @use_nocapture(ptr noundef nocapture %dest)
@@ -620,7 +620,7 @@ define void @multi_bb_dom_test1(i1 %b) {
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 40, i32 50, i32 60 }, ptr [[SRC]], align 4
; CHECK-NEXT: br label [[BB2]]
; CHECK: bb2:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
; CHECK-NEXT: ret void
@@ -641,7 +641,7 @@ bb1:
br label %bb2
bb2:
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false); 1
%1 = call i32 @use_nocapture(ptr noundef nocapture %dest)
@@ -671,7 +671,7 @@ define void @multi_bb_pdom_test0(i1 %b) {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false); 1
br i1 %b, label %bb0, label %bb1
@@ -686,7 +686,7 @@ bb1:
bb2:
%3 = call i32 @use_nocapture(ptr noundef nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
uselistorder ptr %dest, { 2, 3, 0, 1, 4, 5 }
@@ -711,7 +711,7 @@ define void @multi_bb_pdom_test1(i1 %b) {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false); 1
br i1 %b, label %bb0, label %bb1
@@ -747,7 +747,7 @@ define void @multi_bb_pdom_test2(i1 %b) {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false); 1
%1 = call i32 @use_nocapture(ptr noundef nocapture %dest)
@@ -784,8 +784,8 @@ entry:
%nlt1 = icmp slt i32 %n, 1
%src = alloca %struct.Foo, align 8
%dest = alloca %struct.Foo, align 8
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 0, i32 1, i32 42 }, ptr %src
br i1 %nlt1, label %loop_exit, label %loop_body
@@ -816,8 +816,8 @@ define void @multi_bb_unreachable_modref(i1 %b0) {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
br i1 %b0, label %bb0, label %exit
@@ -828,8 +828,8 @@ exit:
bb0:
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -850,8 +850,8 @@ define void @multi_bb_non_dominated(i1 %b0, i1 %b1) {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
br i1 %b0, label %bb0, label %bb1
@@ -865,8 +865,8 @@ bb1:
br label %bb2
bb2:
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -878,30 +878,30 @@ define void @memcpy_is_def() {
; CHECK-LABEL: define void @memcpy_is_def() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[SRC]], ptr align 4 [[DEST]], i64 12, i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr noundef nocapture %dest)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %src, ptr align 4 %dest, i64 12, i1 false)
%3 = call i32 @use_nocapture(ptr noundef nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -912,30 +912,30 @@ define void @memset_is_def() {
; CHECK-LABEL: define void @memset_is_def() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[SRC]], i8 42, i64 12, i1 false)
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr noundef nocapture %dest)
call void @llvm.memset.p0.i64(ptr align 4 %src, i8 42, i64 12, i1 false)
%3 = call i32 @use_nocapture(ptr noundef nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -946,8 +946,8 @@ define void @store_is_def() {
; CHECK-LABEL: define void @store_is_def() {
; CHECK-NEXT: [[SRC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store i32 42, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[SRC]], align 4
@@ -955,14 +955,14 @@ define void @store_is_def() {
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
; CHECK-NEXT: store i32 64, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca i32, align 4
%dest = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store i32 42, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
%2 = load i32, ptr %src
@@ -970,8 +970,8 @@ define void @store_is_def() {
%3 = call i32 @use_nocapture(ptr noundef nocapture %dest)
store i32 64, ptr %src
%4 = call i32 @use_nocapture(ptr noundef nocapture %src)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 4, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -982,8 +982,8 @@ define void @multi_bb_dataflow(i1 %b) {
; CHECK-SAME: (i1 [[B:%.*]]) {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
@@ -995,14 +995,14 @@ define void @multi_bb_dataflow(i1 %b) {
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
; CHECK-NEXT: br label [[BB2]]
; CHECK: bb2:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
@@ -1017,8 +1017,8 @@ bb1:
br label %bb2
bb2:
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1031,26 +1031,26 @@ define void @incomplete_memcpy() {
; CHECK-LABEL: define void @incomplete_memcpy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 11, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 11, i1 false)
%2 = call i32 @use_nocapture(ptr noundef nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1060,28 +1060,28 @@ define void @incomplete_store() {
; CHECK-LABEL: define void @incomplete_store() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[SRC]], align 4
; CHECK-NEXT: store i32 [[TMP2]], ptr [[DEST]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
%2 = load i32, ptr %src
store i32 %2, ptr %dest
%3 = call i32 @use_nocapture(ptr noundef nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1091,28 +1091,28 @@ define void @dynamically_sized_alloca(i64 %i) {
; CHECK-SAME: (i64 [[I:%.*]]) {
; CHECK-NEXT: [[SRC:%.*]] = alloca i8, i64 [[I]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i64 [[I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO:%.*]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca i8, i64 %i, align 4
%dest = alloca i8, i64 %i, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 -1, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 -1, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 -1, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1122,28 +1122,28 @@ define void @inalloca() {
; CHECK-LABEL: define void @inalloca() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca inalloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca inalloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1153,28 +1153,28 @@ define void @dynamically_sized_memcpy(i64 %size) {
; CHECK-SAME: (i64 [[SIZE:%.*]]) {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 [[SIZE]], i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 %size, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
ret void
}
@@ -1183,28 +1183,28 @@ define void @mismatched_alloca_size() {
; CHECK-LABEL: define void @mismatched_alloca_size() {
; CHECK-NEXT: [[SRC:%.*]] = alloca i8, i64 24, align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i64 12, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO:%.*]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 24, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca i8, i64 24, align 4
%dest = alloca i8, i64 12, align 4
- call void @llvm.lifetime.start.p0(i64 24, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 24, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1213,28 +1213,28 @@ define void @mismatched_alloca_addrspace() {
; CHECK-LABEL: define void @mismatched_alloca_addrspace() {
; CHECK-NEXT: [[SRC:%.*]] = alloca i8, i64 24, align 4, addrspace(1)
; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i64 12, align 4, addrspace(2)
-; CHECK-NEXT: call void @llvm.lifetime.start.p1(i64 24, ptr addrspace(1) captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p2(i64 12, ptr addrspace(2) captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p1(ptr addrspace(1) captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p2(ptr addrspace(2) captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO:%.*]] { i32 10, i32 20, i32 30 }, ptr addrspace(1) [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr addrspace(1) captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p2.p1.i64(ptr addrspace(2) align 4 [[DEST]], ptr addrspace(1) align 4 [[SRC]], i64 12, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p1(i64 24, ptr addrspace(1) captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p1(ptr addrspace(1) captures(none) [[SRC]])
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr addrspace(2) captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p2(i64 12, ptr addrspace(2) captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p2(ptr addrspace(2) captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca i8, i64 24, align 4, addrspace(1)
%dest = alloca i8, i64 12, align 4, addrspace(2)
- call void @llvm.lifetime.start.p1(i64 24, ptr addrspace(1) nocapture %src)
- call void @llvm.lifetime.start.p2(i64 12, ptr addrspace(2) nocapture %dest)
+ call void @llvm.lifetime.start.p1(ptr addrspace(1) nocapture %src)
+ call void @llvm.lifetime.start.p2(ptr addrspace(2) nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr addrspace(1) %src
%1 = call i32 @use_nocapture(ptr addrspace(1) nocapture %src)
call void @llvm.memcpy.p2.p1.i64(ptr addrspace(2) align 4 %dest, ptr addrspace(1) align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p1(i64 24, ptr addrspace(1) nocapture %src)
+ call void @llvm.lifetime.end.p1(ptr addrspace(1) nocapture %src)
%2 = call i32 @use_nocapture(ptr addrspace(2) nocapture %dest)
- call void @llvm.lifetime.end.p2(i64 12, ptr addrspace(2) nocapture %dest)
+ call void @llvm.lifetime.end.p2(ptr addrspace(2) nocapture %dest)
ret void
}
@@ -1243,28 +1243,28 @@ define void @volatile_memcpy() {
; CHECK-LABEL: define void @volatile_memcpy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 true)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 true)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1273,28 +1273,28 @@ define void @dest_captured() {
; CHECK-LABEL: define void @dest_captured() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_maycapture(ptr [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_maycapture(ptr %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1303,28 +1303,28 @@ define void @src_captured() {
; CHECK-LABEL: define void @src_captured() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_maycapture(ptr [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_maycapture(ptr %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1334,30 +1334,30 @@ define void @mod_ref_before_copy() {
; CHECK-LABEL: define void @mod_ref_before_copy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[R:%.*]] = call i32 @use_readonly(ptr captures(none) [[DEST]])
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%r = call i32 @use_readonly(ptr nocapture %dest)
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1366,30 +1366,30 @@ define void @mod_dest_before_copy() {
; CHECK-LABEL: define void @mod_dest_before_copy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: store i32 13, ptr [[DEST]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
store i32 13, ptr %dest
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1397,22 +1397,22 @@ define void @mod_src_before_store_after_load() {
; CHECK-LABEL: define void @mod_src_before_store_after_load() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: store i32 13, ptr [[DEST]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 13, i32 13, i32 13 }, ptr [[SRC]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
store i32 13, ptr %dest
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -1421,9 +1421,9 @@ define void @mod_src_before_store_after_load() {
store %struct.Foo { i32 13, i32 13, i32 13 }, ptr %src
store %struct.Foo %src.val, ptr %dest
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1433,28 +1433,28 @@ define void @src_mod_dest_ref_after_copy() {
; CHECK-LABEL: define void @src_mod_dest_ref_after_copy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 13, i32 13, i32 13 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
store %struct.Foo { i32 13, i32 13, i32 13 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1464,28 +1464,28 @@ define void @src_ref_dest_mod_after_copy() {
; CHECK-LABEL: define void @src_ref_dest_mod_after_copy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 13, i32 13, i32 13 }, ptr [[DEST]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
store %struct.Foo { i32 13, i32 13, i32 13 }, ptr %dest
%1 = call i32 @use_nocapture(ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1494,22 +1494,22 @@ define void @dest_alias_mod_before_copy() {
; CHECK-LABEL: define void @dest_alias_mod_before_copy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[DEST_ALIAS:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr [[DEST]], i64 0, i32 1
; CHECK-NEXT: store i32 13, ptr [[DEST_ALIAS]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%dest.alias = getelementptr inbounds %struct.Foo, ptr %dest, i64 0, i32 1
store i32 13, ptr %dest.alias
@@ -1518,8 +1518,8 @@ define void @dest_alias_mod_before_copy() {
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1528,22 +1528,22 @@ define void @alias_src_ref_dest_mod_after_copy() {
; CHECK-LABEL: define void @alias_src_ref_dest_mod_after_copy() {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
; CHECK-NEXT: [[DEST_ALIAS:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr [[DEST]], i64 0, i32 1
; CHECK-NEXT: store i32 13, ptr [[DEST_ALIAS]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use_nocapture(ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
@@ -1552,8 +1552,8 @@ define void @alias_src_ref_dest_mod_after_copy() {
%dest.alias = getelementptr inbounds %struct.Foo, ptr %dest, i64 0, i32 1
store i32 13, ptr %dest.alias
%2 = call i32 @use_nocapture(ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1564,8 +1564,8 @@ define void @multi_bb_dataflow_conflict(i1 %b) {
; CHECK-SAME: (i1 [[B:%.*]]) {
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[SRC]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DEST]], ptr align 4 [[SRC]], i64 12, i1 false)
@@ -1578,14 +1578,14 @@ define void @multi_bb_dataflow_conflict(i1 %b) {
; CHECK-NEXT: br label [[BB2]]
; CHECK: bb2:
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @use_nocapture(ptr noundef captures(none) [[DEST]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: ret void
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr noundef nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
@@ -1601,8 +1601,8 @@ bb1:
bb2:
%4 = call i32 @use_nocapture(ptr noundef nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
@@ -1614,8 +1614,8 @@ define void @multi_bb_loop_dest_mod_before_copy(i32 %n) {
; CHECK-NEXT: [[NLT1:%.*]] = icmp slt i32 [[N]], 1
; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 8
; CHECK-NEXT: [[DEST:%.*]] = alloca [[STRUCT_FOO]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[SRC]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr captures(none) [[DEST]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[SRC]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr captures(none) [[DEST]])
; CHECK-NEXT: store [[STRUCT_FOO]] { i32 0, i32 1, i32 42 }, ptr [[SRC]], align 4
; CHECK-NEXT: br i1 [[NLT1]], label [[LOOP_EXIT:%.*]], label [[LOOP_BODY:%.*]]
; CHECK: loop_body:
@@ -1632,8 +1632,8 @@ entry:
%nlt1 = icmp slt i32 %n, 1
%src = alloca %struct.Foo, align 8
%dest = alloca %struct.Foo, align 8
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 0, i32 1, i32 42 }, ptr %src
br i1 %nlt1, label %loop_exit, label %loop_body
@@ -1660,17 +1660,17 @@ define void @partial_lifetime() {
;
%src = alloca %struct.Foo, align 4
%dest = alloca %struct.Foo, align 4
- call void @llvm.lifetime.start.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.start.p0(i64 3, ptr nocapture %dest)
+ call void @llvm.lifetime.start.p0(ptr nocapture %src)
+ call void @llvm.lifetime.start.p0(ptr nocapture %dest)
store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
%1 = call i32 @use_nocapture(ptr nocapture %src)
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %src, i64 12, i1 false)
- call void @llvm.lifetime.end.p0(i64 3, ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
%2 = call i32 @use_nocapture(ptr nocapture %dest)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %src)
- call void @llvm.lifetime.end.p0(i64 12, ptr nocapture %dest)
+ call void @llvm.lifetime.end.p0(ptr nocapture %src)
+ call void @llvm.lifetime.end.p0(ptr nocapture %dest)
ret void
}
diff --git a/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll b/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll
index 179d6e6..e2f5007 100644
--- a/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll
+++ b/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll
@@ -35,10 +35,10 @@ if.end5: ; preds = %if.then, %entry
ret i1 %rez.0
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind }
diff --git a/llvm/test/Transforms/MoveAutoInit/clobber.ll b/llvm/test/Transforms/MoveAutoInit/clobber.ll
index 08ffb13..f52034d 100644
--- a/llvm/test/Transforms/MoveAutoInit/clobber.ll
+++ b/llvm/test/Transforms/MoveAutoInit/clobber.ll
@@ -10,9 +10,9 @@ define i32 @foo(i32 noundef %0, i32 noundef %1, i32 noundef %2) #0 {
; CHECK-NEXT: [[TMP4:%.*]] = alloca [100 x i8], align 16
; CHECK-NEXT: [[TMP5:%.*]] = alloca [2 x i8], align 1
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [100 x i8], ptr [[TMP4]], i64 0, i64 0
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 100, ptr nonnull [[TMP4]]) #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]]) #[[ATTR3:[0-9]+]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8], ptr [[TMP5]], i64 0, i64 0
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr nonnull [[TMP5]]) #[[ATTR3]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP5]]) #[[ATTR3]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x i8], ptr [[TMP5]], i64 0, i64 1
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP1:%.*]], 0
; CHECK-NEXT: br i1 [[TMP9]], label [[TMP15:%.*]], label [[TMP10:%.*]]
@@ -38,19 +38,19 @@ define i32 @foo(i32 noundef %0, i32 noundef %1, i32 noundef %2) #0 {
; CHECK-NEXT: br label [[TMP22]]
; CHECK: 22:
; CHECK-NEXT: [[TMP23:%.*]] = phi i32 [ [[TMP14]], [[TMP10]] ], [ [[TMP21]], [[TMP17]] ], [ 0, [[TMP15]] ]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr nonnull [[TMP5]]) #[[ATTR3]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 100, ptr nonnull [[TMP4]]) #[[ATTR3]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP5]]) #[[ATTR3]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP4]]) #[[ATTR3]]
; CHECK-NEXT: ret i32 [[TMP23]]
;
%4 = alloca [100 x i8], align 16
%5 = alloca [2 x i8], align 1
%6 = getelementptr inbounds [100 x i8], ptr %4, i64 0, i64 0
- call void @llvm.lifetime.start.p0(i64 100, ptr nonnull %4) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %4) #3
; This memset must move.
call void @llvm.memset.p0.i64(ptr noundef nonnull align 16 dereferenceable(100) %6, i8 -86, i64 100, i1 false), !annotation !0
%7 = getelementptr inbounds [2 x i8], ptr %5, i64 0, i64 0
- call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %5) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %5) #3
; This store must move.
store i8 -86, ptr %7, align 1, !annotation !0
%8 = getelementptr inbounds [2 x i8], ptr %5, i64 0, i64 1
@@ -81,16 +81,16 @@ define i32 @foo(i32 noundef %0, i32 noundef %1, i32 noundef %2) #0 {
22:
%23 = phi i32 [ %14, %10 ], [ %21, %17 ], [ 0, %15 ]
- call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %5) #3
- call void @llvm.lifetime.end.p0(i64 100, ptr nonnull %4) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %5) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %4) #3
ret i32 %23
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #2
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { mustprogress nofree nosync nounwind readnone uwtable willreturn }
attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/NewGVN/coercion-different-ptr.ll b/llvm/test/Transforms/NewGVN/coercion-different-ptr.ll
index dfd6d7d..979aa69 100644
--- a/llvm/test/Transforms/NewGVN/coercion-different-ptr.ll
+++ b/llvm/test/Transforms/NewGVN/coercion-different-ptr.ll
@@ -11,7 +11,7 @@ define void @foo(ptr %arg) {
; CHECK-SAME: ptr [[ARG:%.*]]) {
; CHECK-NEXT: [[BB:.*:]]
; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i8, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[ALLOCA]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[ALLOCA]])
; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[ARG]], align 8
; CHECK-NEXT: [[LOAD1:%.*]] = load ptr, ptr [[LOAD]], align 8
; CHECK-NEXT: [[CALL:%.*]] = call ptr [[LOAD1]](ptr [[ALLOCA]])
@@ -19,14 +19,14 @@ define void @foo(ptr %arg) {
;
bb:
%alloca = alloca i8, align 16
- call void @llvm.lifetime.start.p0(i64 1, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
%load = load ptr, ptr %arg, align 8
%load1 = load ptr, ptr %load, align 8
%call = call ptr %load1(ptr %alloca)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none)) #0
+declare void @llvm.lifetime.start.p0(ptr captures(none)) #0
declare ptr @malloc(i64)
diff --git a/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll b/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll
index 017f608..8b2d662 100644
--- a/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll
+++ b/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll
@@ -18,7 +18,7 @@ define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
entry:
%sv = alloca %"class.llvm::SmallVector", align 16
- call void @llvm.lifetime.start.p0(i64 64, ptr %sv) #1
+ call void @llvm.lifetime.start.p0(ptr %sv) #1
%FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
store ptr %FirstEl.i.i.i.i.i.i, ptr %sv, align 16, !tbaa !4
%EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
@@ -87,7 +87,7 @@ if.then.i.i.i20: ; preds = %invoke.cont3
br label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21
_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.then.i.i.i20
- call void @llvm.lifetime.end.p0(i64 64, ptr %sv) #1
+ call void @llvm.lifetime.end.p0(ptr %sv) #1
ret void
lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2
@@ -106,14 +106,14 @@ eh.resume: ; preds = %if.then.i.i.i, %lpa
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @__gxx_personality_v0(...)
declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr) #2
; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr, i64, i64) #2
diff --git a/llvm/test/Transforms/NewGVN/lifetime-simple.ll b/llvm/test/Transforms/NewGVN/lifetime-simple.ll
index 0a7bd33..7fe6649 100644
--- a/llvm/test/Transforms/NewGVN/lifetime-simple.ll
+++ b/llvm/test/Transforms/NewGVN/lifetime-simple.ll
@@ -9,21 +9,21 @@ define i8 @test() nounwind {
; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P:%.*]] = alloca [32 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[P]])
; CHECK-NEXT: store i8 1, ptr [[P]], align 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[P]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[P]])
; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[P]], align 1
; CHECK-NEXT: ret i8 [[TMP0]]
;
entry:
%P = alloca [32 x i8]
- call void @llvm.lifetime.start.p0(i64 32, ptr %P)
+ call void @llvm.lifetime.start.p0(ptr %P)
%0 = load i8, ptr %P
store i8 1, ptr %P
- call void @llvm.lifetime.end.p0(i64 32, ptr %P)
+ call void @llvm.lifetime.end.p0(ptr %P)
%1 = load i8, ptr %P
ret i8 %1
}
-declare void @llvm.lifetime.start.p0(i64 %S, ptr nocapture %P) readonly
-declare void @llvm.lifetime.end.p0(i64 %S, ptr nocapture %P)
+declare void @llvm.lifetime.start.p0(ptr nocapture %P) readonly
+declare void @llvm.lifetime.end.p0(ptr nocapture %P)
diff --git a/llvm/test/Transforms/NewGVN/verify-memoryphi.ll b/llvm/test/Transforms/NewGVN/verify-memoryphi.ll
index a19a2a6..15bb1cb 100644
--- a/llvm/test/Transforms/NewGVN/verify-memoryphi.ll
+++ b/llvm/test/Transforms/NewGVN/verify-memoryphi.ll
@@ -5,7 +5,7 @@
; REQUIRES: asserts
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
define void @tinkywinky() {
; CHECK-LABEL: define void @tinkywinky() {
@@ -20,11 +20,11 @@ define void @tinkywinky() {
;
entry:
%a = alloca i8
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
br i1 false, label %body, label %end
body:
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
br label %end
end:
diff --git a/llvm/test/Transforms/NewGVN/vscale.ll b/llvm/test/Transforms/NewGVN/vscale.ll
index 7021172..64e22e1 100644
--- a/llvm/test/Transforms/NewGVN/vscale.ll
+++ b/llvm/test/Transforms/NewGVN/vscale.ll
@@ -579,7 +579,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
; CHECK-LABEL: @bigexample(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[REF_TMP:%.*]] = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull [[REF_TMP]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[REF_TMP]])
; CHECK-NEXT: [[A_ELT:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } [[A:%.*]], 0
; CHECK-NEXT: store <vscale x 4 x i32> [[A_ELT]], ptr [[REF_TMP]], align 16
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
@@ -603,12 +603,12 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
; CHECK-NEXT: [[TMP12:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP9]], <vscale x 16 x i8> [[DOTUNPACK10]], 2
; CHECK-NEXT: [[DOTUNPACK12:%.*]] = load <vscale x 16 x i8>, ptr [[REF_TMP_REPACK5]], align 16
; CHECK-NEXT: [[TMP15:%.*]] = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP12]], <vscale x 16 x i8> [[DOTUNPACK12]], 3
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull [[REF_TMP]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[REF_TMP]])
; CHECK-NEXT: ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[TMP15]]
;
entry:
%ref.tmp = alloca { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> }, align 16
- call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %ref.tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %ref.tmp)
%a.elt = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32> } %a, 0
store <vscale x 4 x i32> %a.elt, ptr %ref.tmp, align 16
%0 = call i64 @llvm.vscale.i64()
@@ -643,7 +643,7 @@ entry:
%.elt11 = getelementptr inbounds i8, ptr %ref.tmp, i64 %14
%.unpack12 = load <vscale x 16 x i8>, ptr %.elt11, align 16
%15 = insertvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %12, <vscale x 16 x i8> %.unpack12, 3
- call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %ref.tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %ref.tmp)
ret { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %15
}
diff --git a/llvm/test/Transforms/ObjCARC/inlined-autorelease-return-value.ll b/llvm/test/Transforms/ObjCARC/inlined-autorelease-return-value.ll
index 180fd0a..694deb3 100644
--- a/llvm/test/Transforms/ObjCARC/inlined-autorelease-return-value.ll
+++ b/llvm/test/Transforms/ObjCARC/inlined-autorelease-return-value.ll
@@ -7,8 +7,8 @@ declare ptr @llvm.objc.autoreleaseReturnValue(ptr)
declare ptr @llvm.objc.retainAutoreleasedReturnValue(ptr)
declare ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr)
declare void @opaque()
-declare void @llvm.lifetime.start(i64, ptr nocapture)
-declare void @llvm.lifetime.end(i64, ptr nocapture)
+declare void @llvm.lifetime.start(ptr nocapture)
+declare void @llvm.lifetime.end(ptr nocapture)
; CHECK-LABEL: define ptr @elide_with_retainRV(
; CHECK-NEXT: entry:
@@ -81,16 +81,16 @@ entry:
; CHECK-LABEL: define ptr @elide_with_retainRV_splitByLifetime(
; CHECK-NEXT: entry:
; CHECK-NEXT: %x = alloca ptr
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %x)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %x)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: ret ptr %x
define ptr @elide_with_retainRV_splitByLifetime() nounwind {
entry:
; Cleanup should skip over lifetime intrinsics.
%x = alloca ptr
- call void @llvm.lifetime.start(i64 8, ptr %x)
+ call void @llvm.lifetime.start(ptr %x)
%b = call ptr @llvm.objc.autoreleaseReturnValue(ptr %x) nounwind
- call void @llvm.lifetime.end(i64 8, ptr %x)
+ call void @llvm.lifetime.end(ptr %x)
%d = call ptr @llvm.objc.retainAutoreleasedReturnValue(ptr %b) nounwind
ret ptr %d
}
@@ -221,17 +221,17 @@ entry:
; CHECK-LABEL: define ptr @elide_with_claimRV_splitByLifetime(
; CHECK-NEXT: entry:
; CHECK-NEXT: %x = alloca ptr
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %x)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %x)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %x)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %x)
; CHECK-NEXT: tail call void @llvm.objc.release(ptr %x)
; CHECK-NEXT: ret ptr %x
define ptr @elide_with_claimRV_splitByLifetime() nounwind {
entry:
; Cleanup should skip over lifetime intrinsics.
%x = alloca ptr
- call void @llvm.lifetime.start(i64 8, ptr %x)
+ call void @llvm.lifetime.start(ptr %x)
%b = call ptr @llvm.objc.autoreleaseReturnValue(ptr %x) nounwind
- call void @llvm.lifetime.end(i64 8, ptr %x)
+ call void @llvm.lifetime.end(ptr %x)
%d = call ptr @llvm.objc.unsafeClaimAutoreleasedReturnValue(ptr %b) nounwind
ret ptr %d
}
diff --git a/llvm/test/Transforms/ObjCARC/post-inlining.ll b/llvm/test/Transforms/ObjCARC/post-inlining.ll
index c15e089..b184bea 100644
--- a/llvm/test/Transforms/ObjCARC/post-inlining.ll
+++ b/llvm/test/Transforms/ObjCARC/post-inlining.ll
@@ -65,22 +65,22 @@ entry:
; 2) Lifetime markers.
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
; CHECK-LABEL: define ptr @testLifetime(
; CHECK: entry:
; CHECK-NEXT: %obj = alloca i8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr %obj)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr %obj)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr %obj)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr %obj)
; CHECK-NEXT: ret ptr %call.i
; CHECK-NEXT: }
define ptr @testLifetime(ptr %call.i) {
entry:
%obj = alloca i8
- call void @llvm.lifetime.start.p0(i64 8, ptr %obj)
+ call void @llvm.lifetime.start.p0(ptr %obj)
%0 = tail call ptr @llvm.objc.autoreleaseReturnValue(ptr %call.i) nounwind
- call void @llvm.lifetime.end.p0(i64 8, ptr %obj)
+ call void @llvm.lifetime.end.p0(ptr %obj)
%1 = tail call ptr @llvm.objc.retainAutoreleasedReturnValue(ptr %call.i) nounwind
ret ptr %call.i
}
diff --git a/llvm/test/Transforms/ObjCARC/related-check.ll b/llvm/test/Transforms/ObjCARC/related-check.ll
index 7c56b2d..045c001 100644
--- a/llvm/test/Transforms/ObjCARC/related-check.ll
+++ b/llvm/test/Transforms/ObjCARC/related-check.ll
@@ -52,9 +52,9 @@ for.cond.cleanup: ; preds = %for.cond.cleanup.lo
for.body: ; preds = %for.body.lr.ph, %if.end19
%i.032 = phi i32 [ 1, %for.body.lr.ph ], [ %inc, %if.end19 ]
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %persistent) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %persistent) #4
store i32 0, ptr %persistent, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %personalized) #4
+ call void @llvm.lifetime.start.p0(ptr nonnull %personalized) #4
store i32 0, ptr %personalized, align 4
%call = call zeroext i1 @lookupType(ptr noundef nonnull %persistent, ptr noundef nonnull %personalized) #8, !clang.arc.no_objc_arc_exceptions !15
br i1 %call, label %if.then, label %if.end19
@@ -110,18 +110,18 @@ if.end18: ; preds = %if.else, %if.then13
br label %if.end19
if.end19: ; preds = %if.end18, %for.body
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %personalized) #4
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %persistent) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %personalized) #4
+ call void @llvm.lifetime.end.p0(ptr nonnull %persistent) #4
%inc = add nuw nsw i32 %i.032, 1
%exitcond.not = icmp eq i32 %inc, %argc
br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body
}
; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
; Function Attrs: inaccessiblememonly mustprogress nocallback nofree nosync nounwind willreturn
declare void @llvm.objc.clang.arc.noop.use(...) #5
diff --git a/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll b/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll
index ad41639..60969ec 100644
--- a/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll
+++ b/llvm/test/Transforms/OpenMP/custom_state_machines_remarks.ll
@@ -72,10 +72,10 @@ common.ret: ; preds = %entry, %user_code.e
user_code.entry: ; preds = %entry
%1 = call i32 @__kmpc_global_thread_num(ptr nonnull @3) #3
call void @unknown() #6, !dbg !20
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i.i) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i.i) #3
%2 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %2, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i.i, i64 noundef 0) #3, !dbg !23
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i.i) #3, !dbg !26
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i.i) #3, !dbg !26
call void @unknown() #6, !dbg !27
call void @__kmpc_target_deinit() #3, !dbg !28
br label %common.ret
@@ -116,18 +116,18 @@ common.ret: ; preds = %entry, %user_code.e
user_code.entry: ; preds = %entry
%1 = call i32 @__kmpc_global_thread_num(ptr nonnull @9) #3
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3
%2 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %2, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !35
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !39
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !39
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3
%3 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %3, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !40
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !42
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !42
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3
%4 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %4, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !43
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !45
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !45
call void @no_openmp()
call void @no_parallelism()
call void @__kmpc_target_deinit() #3, !dbg !46
@@ -155,10 +155,10 @@ declare void @__kmpc_get_shared_variables(ptr) local_unnamed_addr
declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64) local_unnamed_addr
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.start.p0(ptr nocapture) #5
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.end.p0(ptr nocapture) #5
declare void @no_openmp() #7
declare void @no_parallelism() #8
diff --git a/llvm/test/Transforms/OpenMP/nested_parallelism.ll b/llvm/test/Transforms/OpenMP/nested_parallelism.ll
index 412e5ea..5d96465 100644
--- a/llvm/test/Transforms/OpenMP/nested_parallelism.ll
+++ b/llvm/test/Transforms/OpenMP/nested_parallelism.ll
@@ -52,7 +52,7 @@ define weak_odr protected ptx_kernel void @__omp_offloading_10302_bd7e0_main_l13
; CHECK: common.ret:
; CHECK-NEXT: ret void
; CHECK: user_code.entry:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @__kmpc_global_thread_num(ptr nonnull @[[GLOB1]]) #[[ATTR2:[0-9]+]]
; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @__kmpc_get_hardware_thread_id_in_block() #[[ATTR2]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], 0
@@ -66,7 +66,7 @@ define weak_odr protected ptx_kernel void @__omp_offloading_10302_bd7e0_main_l13
; CHECK-NEXT: [[TMP4:%.*]] = addrspacecast ptr [[CAPTURED_VARS_ADDRS_I]] to ptr addrspace(5)
; CHECK-NEXT: store ptr addrspacecast (ptr addrspace(3) @i_shared to ptr), ptr addrspace(5) [[TMP4]], align 8
; CHECK-NEXT: call void @__kmpc_parallel_51(ptr nonnull @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__, ptr nonnull @__omp_outlined___wrapper, ptr nonnull [[CAPTURED_VARS_ADDRS_I]], i64 1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
; CHECK-NEXT: call void @__kmpc_target_deinit()
; CHECK-NEXT: br label [[COMMON_RET]]
;
@@ -80,7 +80,7 @@ common.ret: ; preds = %entry, %_Z3fooi.int
ret void
user_code.entry: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %captured_vars_addrs.i)
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i)
%1 = tail call i32 @__kmpc_global_thread_num(ptr nonnull @1) #6
%2 = tail call i32 @__kmpc_get_hardware_thread_id_in_block() #6
%3 = icmp eq i32 %2, 0
@@ -95,7 +95,7 @@ _Z3fooi.internalized.exit: ; preds = %user_code.entry, %r
tail call void @__kmpc_barrier_simple_spmd(ptr nonnull @1, i32 %2)
store ptr addrspacecast (ptr addrspace(3) @i_shared to ptr), ptr %captured_vars_addrs.i, align 8
call void @__kmpc_parallel_51(ptr nonnull @1, i32 %1, i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__, ptr nonnull @__omp_outlined___wrapper, ptr nonnull %captured_vars_addrs.i, i64 1) #6
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %captured_vars_addrs.i)
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i)
call void @__kmpc_target_deinit() #6
br label %common.ret
}
@@ -139,13 +139,13 @@ define weak_odr protected ptx_kernel void @__omp_offloading_10302_bd7e0_main_l16
; CHECK-NEXT: ret void
; CHECK: user_code.entry:
; CHECK-NEXT: [[I_ADDR_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i64 [[I:%.*]] to i32
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @__kmpc_global_thread_num(ptr nonnull @[[GLOB1]]) #[[ATTR2]]
; CHECK-NEXT: store i32 [[I_ADDR_SROA_0_0_EXTRACT_TRUNC]], ptr addrspace(3) @i.i_shared, align 16
; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr [[CAPTURED_VARS_ADDRS_I]] to ptr addrspace(5)
; CHECK-NEXT: store ptr addrspacecast (ptr addrspace(3) @i.i_shared to ptr), ptr addrspace(5) [[TMP2]], align 8
; CHECK-NEXT: call void @__kmpc_parallel_51(ptr nonnull @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull [[CAPTURED_VARS_ADDRS_I]], i64 1)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
; CHECK-NEXT: call void @__kmpc_target_deinit()
; CHECK-NEXT: br label [[COMMON_RET]]
;
@@ -160,12 +160,12 @@ common.ret: ; preds = %entry, %user_code.e
user_code.entry: ; preds = %entry
%i.addr.sroa.0.0.extract.trunc = trunc i64 %i to i32
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %captured_vars_addrs.i)
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i)
%1 = tail call i32 @__kmpc_global_thread_num(ptr nonnull @1) #6
store i32 %i.addr.sroa.0.0.extract.trunc, ptr addrspacecast (ptr addrspace(3) @i.i_shared to ptr), align 16
store ptr addrspacecast (ptr addrspace(3) @i.i_shared to ptr), ptr %captured_vars_addrs.i, align 8
call void @__kmpc_parallel_51(ptr nonnull @1, i32 %1, i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull %captured_vars_addrs.i, i64 1) #6
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %captured_vars_addrs.i)
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i)
call void @__kmpc_target_deinit() #6
br label %common.ret
}
@@ -201,7 +201,7 @@ define internal void @__omp_outlined__(ptr noalias nocapture readnone %.global_t
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CAPTURED_VARS_ADDRS_I:%.*]] = alloca [1 x ptr], align 8
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I:%.*]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
; CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @__kmpc_global_thread_num(ptr nonnull @[[GLOB1]]) #[[ATTR2]]
; CHECK-NEXT: [[I_I:%.*]] = tail call align 16 dereferenceable_or_null(4) ptr @__kmpc_alloc_shared(i64 4) #[[ATTR2]]
; CHECK-NEXT: store i32 [[TMP0]], ptr [[I_I]], align 16
@@ -209,20 +209,20 @@ define internal void @__omp_outlined__(ptr noalias nocapture readnone %.global_t
; CHECK-NEXT: store ptr [[I_I]], ptr addrspace(5) [[TMP2]], align 8
; CHECK-NEXT: call void @__kmpc_parallel_51(ptr nonnull @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull [[CAPTURED_VARS_ADDRS_I]], i64 1)
; CHECK-NEXT: call void @__kmpc_free_shared(ptr [[I_I]], i64 4) #[[ATTR2]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I]])
; CHECK-NEXT: ret void
;
entry:
%captured_vars_addrs.i = alloca [1 x ptr], align 8
%0 = load i32, ptr %i, align 4
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %captured_vars_addrs.i)
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i)
%1 = tail call i32 @__kmpc_global_thread_num(ptr nonnull @1) #6
%i.i = tail call align 16 dereferenceable_or_null(4) ptr @__kmpc_alloc_shared(i64 4) #6
store i32 %0, ptr %i.i, align 16
store ptr %i.i, ptr %captured_vars_addrs.i, align 8
call void @__kmpc_parallel_51(ptr nonnull @1, i32 %1, i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull %captured_vars_addrs.i, i64 1) #6
call void @__kmpc_free_shared(ptr %i.i, i64 4) #6
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %captured_vars_addrs.i)
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i)
ret void
}
@@ -236,7 +236,7 @@ define internal void @__omp_outlined___wrapper(i16 zeroext %0, i32 %1) #5 {
; CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr addrspace(5) [[TMP5]], align 8
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]])
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_global_thread_num(ptr nonnull @[[GLOB1]]) #[[ATTR2]]
; CHECK-NEXT: [[I_I_I:%.*]] = call align 16 dereferenceable_or_null(4) ptr @__kmpc_alloc_shared(i64 4) #[[ATTR2]]
; CHECK-NEXT: store i32 [[TMP4]], ptr [[I_I_I]], align 16
@@ -244,7 +244,7 @@ define internal void @__omp_outlined___wrapper(i16 zeroext %0, i32 %1) #5 {
; CHECK-NEXT: store ptr [[I_I_I]], ptr addrspace(5) [[TMP7]], align 8
; CHECK-NEXT: call void @__kmpc_parallel_51(ptr nonnull @[[GLOB1]], i32 [[TMP6]], i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]], i64 1)
; CHECK-NEXT: call void @__kmpc_free_shared(ptr [[I_I_I]], i64 4) #[[ATTR2]]
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[CAPTURED_VARS_ADDRS_I_I]])
; CHECK-NEXT: ret void
;
entry:
@@ -254,14 +254,14 @@ entry:
%2 = load ptr, ptr %global_args, align 8
%3 = load ptr, ptr %2, align 8
%4 = load i32, ptr %3, align 4
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %captured_vars_addrs.i.i)
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i.i)
%5 = call i32 @__kmpc_global_thread_num(ptr nonnull @1) #6
%i.i.i = call align 16 dereferenceable_or_null(4) ptr @__kmpc_alloc_shared(i64 4) #6
store i32 %4, ptr %i.i.i, align 16
store ptr %i.i.i, ptr %captured_vars_addrs.i.i, align 8
call void @__kmpc_parallel_51(ptr nonnull @1, i32 %5, i32 1, i32 -1, i32 -1, ptr nonnull @__omp_outlined__1, ptr nonnull @__omp_outlined__1_wrapper, ptr nonnull %captured_vars_addrs.i.i, i64 1) #6
call void @__kmpc_free_shared(ptr %i.i.i, i64 4) #6
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %captured_vars_addrs.i.i)
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i.i)
ret void
}
@@ -316,9 +316,9 @@ declare i32 @__kmpc_get_hardware_thread_id_in_block() local_unnamed_addr
declare void @__kmpc_barrier_simple_spmd(ptr, i32) local_unnamed_addr #10
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #11
+declare void @llvm.lifetime.start.p0(ptr nocapture) #11
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #11
+declare void @llvm.lifetime.end.p0(ptr nocapture) #11
!omp_offload.info = !{!0, !1}
diff --git a/llvm/test/Transforms/OpenMP/parallel_deletion.ll b/llvm/test/Transforms/OpenMP/parallel_deletion.ll
index 3e16d96..67970c4 100644
--- a/llvm/test/Transforms/OpenMP/parallel_deletion.ll
+++ b/llvm/test/Transforms/OpenMP/parallel_deletion.ll
@@ -282,46 +282,46 @@ define void @delete_parallel_2() {
; CHECK-LABEL: define {{[^@]+}}@delete_parallel_2() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR18:[0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR18:[0-9]+]]
; CHECK-NEXT: store i32 0, ptr [[A]], align 4
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef nonnull @.omp_outlined..3, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[A]])
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef nonnull @.omp_outlined..4, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[A]])
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef nonnull @.omp_outlined..5, ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[A]])
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef nonnull @.omp_outlined..6, ptr noundef nonnull align 4 captures(none) dereferenceable(4) [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A]])
; CHECK-NEXT: ret void
;
; CHECK1-LABEL: define {{[^@]+}}@delete_parallel_2() {
; CHECK1-NEXT: entry:
; CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR0]]
+; CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR0]]
; CHECK1-NEXT: store i32 0, ptr [[A]], align 4
; CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..3, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]])
; CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..4, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]])
; CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..5, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]])
; CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..6, ptr nocapture noundef nonnull align 4 dereferenceable(4) [[A]])
-; CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A]])
+; CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A]])
; CHECK1-NEXT: ret void
; CHECK2-LABEL: define {{[^@]+}}@delete_parallel_2() {
; CHECK2-NEXT: entry:
; CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK2-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR0]]
+; CHECK2-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 dereferenceable(4) [[A]]) #[[ATTR0]]
; CHECK2-NEXT: store i32 0, ptr [[A]], align 4
; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..3, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]])
; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..4, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]])
; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..5, ptr nocapture nofree noundef nonnull align 4 dereferenceable(4) [[A]])
; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr noundef nonnull align 8 dereferenceable(24) @[[GLOB0]], i32 noundef 1, ptr noundef @.omp_outlined..6, ptr nocapture noundef nonnull align 4 dereferenceable(4) [[A]])
-; CHECK2-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A]])
+; CHECK2-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A]])
; CHECK2-NEXT: ret void
entry:
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
store i32 0, ptr %a, align 4
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @0, i32 1, ptr @.omp_outlined..3, ptr nonnull %a)
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @0, i32 1, ptr @.omp_outlined..4, ptr nonnull %a)
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @0, i32 1, ptr @.omp_outlined..5, ptr nonnull %a)
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @0, i32 1, ptr @.omp_outlined..6, ptr nonnull %a)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
ret void
}
@@ -445,7 +445,7 @@ omp_if.end: ; preds = %entry, %omp_if.then
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare i32 @omp_get_thread_num() inaccessiblememonly nofree nosync nounwind readonly
@@ -531,7 +531,7 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %.
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A1:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 [[A1]]) #[[ATTR20:[0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 [[A1]]) #[[ATTR20:[0-9]+]]
; CHECK-NEXT: store i32 1, ptr [[A1]], align 4
; CHECK-NEXT: store ptr [[A1]], ptr [[DOTOMP_REDUCTION_RED_LIST]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4
@@ -552,7 +552,7 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %.
; CHECK-NEXT: [[TMP8:%.*]] = atomicrmw add ptr [[A]], i32 [[TMP7]] monotonic, align 4
; CHECK-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
; CHECK: .omp.reduction.default:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A1]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A1]])
; CHECK-NEXT: ret void
;
; CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6
@@ -560,7 +560,7 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %.
; CHECK1-NEXT: entry:
; CHECK1-NEXT: [[A1:%.*]] = alloca i32, align 4
; CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
-; CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 [[A1]]) #[[ATTR0]]
+; CHECK1-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 [[A1]]) #[[ATTR0]]
; CHECK1-NEXT: store i32 1, ptr [[A1]], align 4
; CHECK1-NEXT: store ptr [[A1]], ptr [[DOTOMP_REDUCTION_RED_LIST]], align 8
; CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4
@@ -581,14 +581,14 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %.
; CHECK1-NEXT: [[TMP8:%.*]] = atomicrmw add ptr [[A]], i32 [[TMP7]] monotonic, align 4
; CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
; CHECK1: .omp.reduction.default:
-; CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A1]])
+; CHECK1-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A1]])
; CHECK1-NEXT: ret void
; CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..6
; CHECK2-SAME: (ptr noalias nocapture nofree noundef nonnull readonly align 4 dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], ptr noalias nocapture nofree readnone [[DOTBOUND_TID_:%.*]], ptr nocapture noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) {
; CHECK2-NEXT: entry:
; CHECK2-NEXT: [[A1:%.*]] = alloca i32, align 4
; CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
-; CHECK2-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull align 4 [[A1]]) #[[ATTR0]]
+; CHECK2-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 [[A1]]) #[[ATTR0]]
; CHECK2-NEXT: store i32 1, ptr [[A1]], align 4
; CHECK2-NEXT: store ptr [[A1]], ptr [[DOTOMP_REDUCTION_RED_LIST]], align 8
; CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4
@@ -609,12 +609,12 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %.
; CHECK2-NEXT: [[TMP8:%.*]] = atomicrmw add ptr [[A]], i32 [[TMP7]] monotonic, align 4
; CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
; CHECK2: .omp.reduction.default:
-; CHECK2-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[A1]])
+; CHECK2-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[A1]])
; CHECK2-NEXT: ret void
entry:
%a1 = alloca i32, align 4
%.omp.reduction.red_list = alloca [1 x ptr], align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %a1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a1)
store i32 1, ptr %a1, align 4
store ptr %a1, ptr %.omp.reduction.red_list, align 8
%tmp2 = load i32, ptr %.global_tid., align 4
@@ -638,7 +638,7 @@ entry:
br label %.omp.reduction.default
.omp.reduction.default: ; preds = %.omp.reduction.case2, %.omp.reduction.case1, %entry
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %a1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a1)
ret void
}
@@ -696,7 +696,7 @@ declare i32 @__kmpc_reduce_nowait(ptr, i32, i32, i64, ptr, ptr, ptr)
declare void @__kmpc_end_reduce_nowait(ptr, i32, ptr)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare !callback !2 void @__kmpc_fork_call(ptr, i32, ptr, ...)
diff --git a/llvm/test/Transforms/OpenMP/parallel_region_merging.ll b/llvm/test/Transforms/OpenMP/parallel_region_merging.ll
index d587b9a..83452e7 100644
--- a/llvm/test/Transforms/OpenMP/parallel_region_merging.ll
+++ b/llvm/test/Transforms/OpenMP/parallel_region_merging.ll
@@ -433,11 +433,11 @@ entry:
%b = alloca i32, align 4
store i32 %a, ptr %a.addr, align 4
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @1, i32 1, ptr @.omp_outlined..14, ptr nonnull %a.addr)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %b)
+ call void @llvm.lifetime.start.p0(ptr nonnull %b)
%0 = ptrtoint ptr %b to i64
%1 = trunc i64 %0 to i32
store i32 %1, ptr %b, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @1, i32 1, ptr @.omp_outlined..15, ptr nonnull %a.addr)
ret void
}
@@ -449,9 +449,9 @@ entry:
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define internal void @.omp_outlined..15(ptr noalias nocapture readnone %.global_tid., ptr noalias nocapture readnone %.bound_tid., ptr nocapture nonnull readonly align 4 dereferenceable(4) %a) {
entry:
@@ -466,12 +466,12 @@ entry:
%b = alloca i32, align 4
store i32 %a, ptr %a.addr, align 4
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @1, i32 1, ptr @.omp_outlined..16, ptr nonnull %a.addr)
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %b)
+ call void @llvm.lifetime.start.p0(ptr nonnull %b)
%0 = load i32, ptr %a.addr, align 4
%add = add nsw i32 %0, 1
store i32 %add, ptr %b, align 4
call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @1, i32 2, ptr @.omp_outlined..17, ptr nonnull %a.addr, ptr nonnull %b)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %b)
+ call void @llvm.lifetime.end.p0(ptr nonnull %b)
ret void
}
@@ -1184,11 +1184,11 @@ entry:
; CHECK: omp_region.body:
; CHECK-NEXT: br label [[SEQ_PAR_MERGED:%.*]]
; CHECK: seq.par.merged:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
; CHECK-NEXT: store i32 [[TMP5]], ptr [[B]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]]
; CHECK: omp.par.merged.split:
; CHECK-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]]
@@ -1216,7 +1216,7 @@ entry:
; CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
; CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
; CHECK: omp_parallel:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @merge_seq_par_use..omp_par, ptr [[A_ADDR]], ptr [[B]])
; CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
; CHECK: omp.par.outlined.exit:
@@ -1224,7 +1224,7 @@ entry:
; CHECK: omp.par.exit.split:
; CHECK-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]]
; CHECK: entry.split.split:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: ret void
; CHECK-LABEL: define {{[^@]+}}@merge_seq_par_use..omp_par
; CHECK-SAME: (ptr noalias [[TID_ADDR:%.*]], ptr noalias [[ZERO_ADDR:%.*]], ptr [[A_ADDR:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -2155,11 +2155,11 @@ entry:
; CHECK: omp_region.body:
; CHECK-NEXT: br label [[SEQ_PAR_MERGED:%.*]]
; CHECK: seq.par.merged:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
; CHECK-NEXT: store i32 [[TMP5]], ptr [[B]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]]
; CHECK: omp.par.merged.split:
; CHECK-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]]
@@ -2187,7 +2187,7 @@ entry:
; CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
; CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
; CHECK: omp_parallel:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @merge_seq_par_use..omp_par, ptr [[A_ADDR]], ptr [[B]])
; CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
; CHECK: omp.par.outlined.exit:
@@ -2195,7 +2195,7 @@ entry:
; CHECK: omp.par.exit.split:
; CHECK-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]]
; CHECK: entry.split.split:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: ret void
; CHECK-LABEL: define {{[^@]+}}@merge_seq_par_use..omp_par
; CHECK-SAME: (ptr noalias [[TID_ADDR:%.*]], ptr noalias [[ZERO_ADDR:%.*]], ptr [[A_ADDR:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -3126,11 +3126,11 @@ entry:
; CHECK: omp_region.body:
; CHECK-NEXT: br label [[SEQ_PAR_MERGED:%.*]]
; CHECK: seq.par.merged:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
; CHECK-NEXT: store i32 [[TMP5]], ptr [[B]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]]
; CHECK: omp.par.merged.split:
; CHECK-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]]
@@ -3158,7 +3158,7 @@ entry:
; CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
; CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
; CHECK: omp_parallel:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @merge_seq_par_use..omp_par, ptr [[A_ADDR]], ptr [[B]])
; CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
; CHECK: omp.par.outlined.exit:
@@ -3166,7 +3166,7 @@ entry:
; CHECK: omp.par.exit.split:
; CHECK-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]]
; CHECK: entry.split.split:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: ret void
; CHECK-LABEL: define {{[^@]+}}@merge_seq_par_use..omp_par
; CHECK-SAME: (ptr noalias [[TID_ADDR:%.*]], ptr noalias [[ZERO_ADDR:%.*]], ptr [[A_ADDR:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -4097,11 +4097,11 @@ entry:
; CHECK: omp_region.body:
; CHECK-NEXT: br label [[SEQ_PAR_MERGED:%.*]]
; CHECK: seq.par.merged:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
; CHECK-NEXT: store i32 [[TMP5]], ptr [[B]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]]
; CHECK: omp.par.merged.split:
; CHECK-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]]
@@ -4129,7 +4129,7 @@ entry:
; CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
; CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
; CHECK: omp_parallel:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @merge_seq_par_use..omp_par, ptr [[A_ADDR]], ptr [[B]])
; CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
; CHECK: omp.par.outlined.exit:
@@ -4137,7 +4137,7 @@ entry:
; CHECK: omp.par.exit.split:
; CHECK-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]]
; CHECK: entry.split.split:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK-NEXT: ret void
; CHECK-LABEL: define {{[^@]+}}@merge_seq_par_use..omp_par
; CHECK-SAME: (ptr noalias [[TID_ADDR:%.*]], ptr noalias [[ZERO_ADDR:%.*]], ptr [[A_ADDR:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
@@ -5148,11 +5148,11 @@ entry:
; CHECK2: omp_region.body:
; CHECK2-NEXT: br label [[SEQ_PAR_MERGED:%.*]]
; CHECK2: seq.par.merged:
-; CHECK2-NEXT: call void @llvm.lifetime.start.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK2-NEXT: call void @llvm.lifetime.start.p0(ptr noundef nonnull [[B]])
; CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[B]] to i64
; CHECK2-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
; CHECK2-NEXT: store i32 [[TMP5]], ptr [[B]], align 4
-; CHECK2-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK2-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK2-NEXT: br label [[OMP_PAR_MERGED_SPLIT:%.*]]
; CHECK2: omp.par.merged.split:
; CHECK2-NEXT: br label [[OMP_REGION_BODY_SPLIT:%.*]]
@@ -5197,13 +5197,13 @@ entry:
; CHECK2-NEXT: store ptr [[A_ADDR]], ptr [[GEP_A_ADDR]], align 8
; CHECK2-NEXT: [[GEP_B:%.*]] = getelementptr { ptr, ptr, ptr }, ptr [[STRUCTARG]], i32 0, i32 2
; CHECK2-NEXT: store ptr [[B]], ptr [[GEP_B]], align 8
-; CHECK2-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[B]])
+; CHECK2-NEXT: call void @llvm.lifetime.start.p0(ptr [[B]])
; CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @merge_seq_par_use..omp_par, ptr [[STRUCTARG]])
; CHECK2-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
; CHECK2: omp.par.exit:
; CHECK2-NEXT: br label [[ENTRY_SPLIT_SPLIT:%.*]]
; CHECK2: entry.split.split:
-; CHECK2-NEXT: call void @llvm.lifetime.end.p0(i64 noundef 4, ptr noundef nonnull [[B]])
+; CHECK2-NEXT: call void @llvm.lifetime.end.p0(ptr noundef nonnull [[B]])
; CHECK2-NEXT: ret void
;
;
diff --git a/llvm/test/Transforms/OpenMP/spmdization.ll b/llvm/test/Transforms/OpenMP/spmdization.ll
index e91f160..0272c41 100644
--- a/llvm/test/Transforms/OpenMP/spmdization.ll
+++ b/llvm/test/Transforms/OpenMP/spmdization.ll
@@ -2922,7 +2922,7 @@ declare void @unknown() #7
declare void @unknowni32p(ptr) #7
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none)) #8
+declare void @llvm.lifetime.start.p0(ptr captures(none)) #8
define weak i32 @__kmpc_target_init(ptr %0, ptr %1) {
; AMDGPU-LABEL: define {{[^@]+}}@__kmpc_target_init
@@ -2958,7 +2958,7 @@ declare void @__kmpc_get_shared_variables(ptr)
declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64) #9
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none)) #8
+declare void @llvm.lifetime.end.p0(ptr captures(none)) #8
; Function Attrs: convergent
declare void @spmd_amenable() #6
diff --git a/llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll b/llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll
index 70c0d04..5a7d097 100644
--- a/llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll
+++ b/llvm/test/Transforms/OpenMP/spmdization_constant_prop.ll
@@ -61,7 +61,7 @@ common.ret: ; preds = %user_code.entry, %e
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p5(i64 immarg, ptr addrspace(5) captures(none)) #1
+declare void @llvm.lifetime.start.p5(ptr addrspace(5) captures(none)) #1
; Function Attrs: alwaysinline mustprogress nofree norecurse nosync nounwind willreturn memory(none)
define internal void @__omp_outlined__(ptr noalias captures(none) %.global_tid., ptr noalias captures(none) %.bound_tid., ptr nonnull align 4 captures(none) %ng, ptr nonnull align 8 captures(none) %aa) #2 {
@@ -72,7 +72,7 @@ entry:
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p5(i64 immarg, ptr addrspace(5) captures(none)) #1
+declare void @llvm.lifetime.end.p5(ptr addrspace(5) captures(none)) #1
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
define internal void @__omp_outlined___wrapper(i16 zeroext %0, i32 noundef %1) #3 {
diff --git a/llvm/test/Transforms/OpenMP/spmdization_indirect.ll b/llvm/test/Transforms/OpenMP/spmdization_indirect.ll
index 3c3e1d7..d1e006a 100644
--- a/llvm/test/Transforms/OpenMP/spmdization_indirect.ll
+++ b/llvm/test/Transforms/OpenMP/spmdization_indirect.ll
@@ -1017,7 +1017,7 @@ declare void @unknown() #5
declare void @unknowni32p(ptr) #5
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none)) #6
+declare void @llvm.lifetime.start.p0(ptr captures(none)) #6
define weak i32 @__kmpc_target_init(ptr %0, ptr %1) {
; AMDGPU-LABEL: define {{[^@]+}}@__kmpc_target_init
@@ -1037,7 +1037,7 @@ declare void @__kmpc_get_shared_variables(ptr)
declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64) #7
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none)) #6
+declare void @llvm.lifetime.end.p0(ptr captures(none)) #6
; Function Attrs: convergent
declare void @spmd_amenable() #4
diff --git a/llvm/test/Transforms/OpenMP/spmdization_remarks.ll b/llvm/test/Transforms/OpenMP/spmdization_remarks.ll
index ef36937b..f30e827 100644
--- a/llvm/test/Transforms/OpenMP/spmdization_remarks.ll
+++ b/llvm/test/Transforms/OpenMP/spmdization_remarks.ll
@@ -75,10 +75,10 @@ common.ret: ; preds = %entry, %user_code.e
user_code.entry: ; preds = %entry
%1 = call i32 @__kmpc_global_thread_num(ptr nonnull @3) #3
call void @unknown() #6, !dbg !20
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i.i) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i.i) #3
%2 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %2, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i.i, i64 noundef 0) #3, !dbg !23
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i.i) #3, !dbg !26
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i.i) #3, !dbg !26
call void @unknown() #6, !dbg !27
call void @__kmpc_target_deinit() #3, !dbg !28
br label %common.ret
@@ -119,18 +119,18 @@ common.ret: ; preds = %entry, %user_code.e
user_code.entry: ; preds = %entry
%1 = call i32 @__kmpc_global_thread_num(ptr nonnull @9) #3
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3
%2 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %2, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !35
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !39
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !39
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3
%3 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %3, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !40
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !42
- call void @llvm.lifetime.start.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !42
+ call void @llvm.lifetime.start.p0(ptr nonnull %captured_vars_addrs.i2.i) #3
%4 = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @13) #3
call void @__kmpc_parallel_51(ptr noundef nonnull @13, i32 %4, i32 noundef 1, i32 noundef -1, i32 noundef -1, ptr noundef @__omp_outlined__2, ptr noundef @__omp_outlined__2_wrapper, ptr noundef nonnull %captured_vars_addrs.i2.i, i64 noundef 0) #3, !dbg !43
- call void @llvm.lifetime.end.p0(i64 0, ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !45
+ call void @llvm.lifetime.end.p0(ptr nonnull %captured_vars_addrs.i2.i) #3, !dbg !45
call void @spmd_amenable()
call void @__kmpc_target_deinit() #3, !dbg !46
br label %common.ret
@@ -157,10 +157,10 @@ declare void @__kmpc_get_shared_variables(ptr) local_unnamed_addr
declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64) local_unnamed_addr
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.start.p0(ptr nocapture) #5
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #5
+declare void @llvm.lifetime.end.p0(ptr nocapture) #5
declare void @spmd_amenable() #7
diff --git a/llvm/test/Transforms/PGOProfile/consecutive-zeros.ll b/llvm/test/Transforms/PGOProfile/consecutive-zeros.ll
index a7337d0..014f95f 100644
--- a/llvm/test/Transforms/PGOProfile/consecutive-zeros.ll
+++ b/llvm/test/Transforms/PGOProfile/consecutive-zeros.ll
@@ -48,11 +48,11 @@ for.end6:
ret void
}
-declare void @llvm.lifetime.start(i64, ptr nocapture)
+declare void @llvm.lifetime.start(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
declare i32 @memcmp(ptr, ptr, i64)
declare i32 @bcmp(ptr, ptr, i64)
-declare void @llvm.lifetime.end(i64, ptr nocapture)
+declare void @llvm.lifetime.end(ptr nocapture)
diff --git a/llvm/test/Transforms/PGOProfile/entry_alloca.ll b/llvm/test/Transforms/PGOProfile/entry_alloca.ll
index 580f055..c791e1d 100644
--- a/llvm/test/Transforms/PGOProfile/entry_alloca.ll
+++ b/llvm/test/Transforms/PGOProfile/entry_alloca.ll
@@ -19,8 +19,8 @@
define dso_local double @foo() {
%1 = alloca %struct.A, align 4
%2 = alloca %struct.B, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1)
- call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %2)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %2)
call void @bar(ptr noundef nonnull %1, ptr noundef nonnull %2)
%3 = load i32, ptr %1, align 4
%4 = icmp sgt i32 %3, 0
@@ -48,8 +48,8 @@ define dso_local double @foo() {
21:
%22 = phi double [ 0.000000e+00, %0 ], [ %18, %9 ]
- call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %2)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
ret double %22
}
diff --git a/llvm/test/Transforms/PGOProfile/memop_size_annotation.ll b/llvm/test/Transforms/PGOProfile/memop_size_annotation.ll
index 3ef185a..9122454 100644
--- a/llvm/test/Transforms/PGOProfile/memop_size_annotation.ll
+++ b/llvm/test/Transforms/PGOProfile/memop_size_annotation.ll
@@ -56,11 +56,11 @@ for.end6:
ret void
}
-declare void @llvm.lifetime.start(i64, ptr nocapture)
+declare void @llvm.lifetime.start(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
declare i32 @memcmp(ptr, ptr, i64)
declare i32 @bcmp(ptr, ptr, i64)
-declare void @llvm.lifetime.end(i64, ptr nocapture)
+declare void @llvm.lifetime.end(ptr nocapture)
diff --git a/llvm/test/Transforms/PGOProfile/memop_size_opt.ll b/llvm/test/Transforms/PGOProfile/memop_size_opt.ll
index c4f749b..f63989a 100644
--- a/llvm/test/Transforms/PGOProfile/memop_size_opt.ll
+++ b/llvm/test/Transforms/PGOProfile/memop_size_opt.ll
@@ -181,14 +181,14 @@ for.end6:
!30 = !{!"VP", i32 1, i64 556, i64 0, i64 99, i64 2, i64 88, i64 3, i64 77, i64 9, i64 72, i64 4, i64 66, i64 5, i64 55, i64 6, i64 44, i64 7, i64 33, i64 8, i64 22}
!31 = !{!"VP", i32 1, i64 556, i64 0, i64 99, i64 2, i64 88, i64 3, i64 77, i64 9, i64 72, i64 4, i64 66, i64 5, i64 55, i64 6, i64 44, i64 7, i64 33, i64 8, i64 22}
-declare void @llvm.lifetime.start(i64, ptr nocapture)
+declare void @llvm.lifetime.start(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
declare i32 @memcmp(ptr, ptr, i64)
declare i32 @bcmp(ptr, ptr, i64)
-declare void @llvm.lifetime.end(i64, ptr nocapture)
+declare void @llvm.lifetime.end(ptr nocapture)
; YAML: --- !Passed
; YAML-NEXT: Pass: pgo-memop-opt
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll
index f187d41..0a1efda 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll
@@ -23,10 +23,10 @@ define i32 @bar() #0 {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #4
+ call void @llvm.lifetime.start.p0(ptr %rando) #4
%call = call i32 (...) @buzz()
store i32 %call, ptr %rando, align 4, !tbaa !3
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) #4
+ call void @llvm.lifetime.start.p0(ptr %x) #4
store i32 0, ptr %x, align 4, !tbaa !3
%0 = load i32, ptr %rando, align 4, !tbaa !3
%rem = srem i32 %0, 200000
@@ -52,13 +52,13 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !tbaa !3
- call void @llvm.lifetime.end.p0(i64 4, ptr %x) #4
- call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #4
+ call void @llvm.lifetime.end.p0(ptr %x) #4
+ call void @llvm.lifetime.end.p0(ptr %rando) #4
ret i32 %2
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @buzz(...) #2
@@ -70,7 +70,7 @@ declare i32 @baz(i32) #2
declare i32 @foo(i32) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind willreturn }
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll
index 146ad44..68b233e 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll
@@ -20,10 +20,10 @@ define i32 @bar() #0 !dbg !6 {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #4, !dbg !9
+ call void @llvm.lifetime.start.p0(ptr %rando) #4, !dbg !9
%call = call i32 (...) @buzz(), !dbg !9
store i32 %call, ptr %rando, align 4, !dbg !9, !tbaa !10
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) #4, !dbg !14
+ call void @llvm.lifetime.start.p0(ptr %x) #4, !dbg !14
store i32 0, ptr %x, align 4, !dbg !14, !tbaa !10
%0 = load i32, ptr %rando, align 4, !dbg !15, !tbaa !10
%rem = srem i32 %0, 200000, !dbg !15
@@ -49,13 +49,13 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !dbg !19, !tbaa !10
- call void @llvm.lifetime.end.p0(i64 4, ptr %x) #4, !dbg !20
- call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #4, !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %x) #4, !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %rando) #4, !dbg !20
ret i32 %2, !dbg !19
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @buzz(...) #2
@@ -67,7 +67,7 @@ declare i32 @baz(i32) #2
declare i32 @foo(i32) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind willreturn }
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll
index 18677b7..2f188f5 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll
@@ -33,10 +33,10 @@ define i32 @bar() #0 {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #4
+ call void @llvm.lifetime.start.p0(ptr %rando) #4
%call = call i32 (...) @buzz()
store i32 %call, ptr %rando, align 4, !tbaa !3
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) #4
+ call void @llvm.lifetime.start.p0(ptr %x) #4
store i32 0, ptr %x, align 4, !tbaa !3
%0 = load i32, ptr %rando, align 4, !tbaa !3
%rem = srem i32 %0, 200000
@@ -62,13 +62,13 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !tbaa !3
- call void @llvm.lifetime.end.p0(i64 4, ptr %x) #4
- call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #4
+ call void @llvm.lifetime.end.p0(ptr %x) #4
+ call void @llvm.lifetime.end.p0(ptr %rando) #4
ret i32 %2
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @buzz(...) #2
@@ -80,7 +80,7 @@ declare i32 @baz(i32) #2
declare i32 @foo(i32) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind willreturn }
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll
index 1e7e8c1..4add781 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll
@@ -21,10 +21,10 @@ define i32 @bar() #0 {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #3
+ call void @llvm.lifetime.start.p0(ptr %rando) #3
%call = call i32 (...) @buzz()
store i32 %call, ptr %rando, align 4, !tbaa !2
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) #3
+ call void @llvm.lifetime.start.p0(ptr %x) #3
store i32 0, ptr %x, align 4, !tbaa !2
%0 = load i32, ptr %rando, align 4, !tbaa !2
%rem = srem i32 %0, 200000
@@ -49,13 +49,13 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !tbaa !2
- call void @llvm.lifetime.end.p0(i64 4, ptr %x) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #3
+ call void @llvm.lifetime.end.p0(ptr %x) #3
+ call void @llvm.lifetime.end.p0(ptr %rando) #3
ret i32 %2
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @buzz(...) #2
@@ -64,7 +64,7 @@ declare i32 @baz(i32) #2
declare i32 @foo(i32) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind willreturn }
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch.ll
index 10c3718..5a7731b 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch.ll
@@ -37,10 +37,10 @@ define i32 @bar() #0 !dbg !6 {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %rando) #4, !dbg !9
+ call void @llvm.lifetime.start.p0(ptr %rando) #4, !dbg !9
%call = call i32 (...) @buzz(), !dbg !9
store i32 %call, ptr %rando, align 4, !dbg !9, !tbaa !10
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) #4, !dbg !14
+ call void @llvm.lifetime.start.p0(ptr %x) #4, !dbg !14
store i32 0, ptr %x, align 4, !dbg !14, !tbaa !10
%0 = load i32, ptr %rando, align 4, !dbg !15, !tbaa !10
%rem = srem i32 %0, 200000, !dbg !15
@@ -66,13 +66,13 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !dbg !19, !tbaa !10
- call void @llvm.lifetime.end.p0(i64 4, ptr %x) #4, !dbg !20
- call void @llvm.lifetime.end.p0(i64 4, ptr %rando) #4, !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %x) #4, !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %rando) #4, !dbg !20
ret i32 %2, !dbg !19
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @buzz(...) #2
@@ -84,7 +84,7 @@ declare i32 @baz(i32) #2
declare i32 @foo(i32) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind willreturn }
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll b/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll
index 6e21c08..859ba72 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll
@@ -40,7 +40,7 @@ target triple = "x86_64-unknown-linux-gnu"
define dso_local void @init_arry() #0 {
entry:
%i = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #6
+ call void @llvm.lifetime.start.p0(ptr %i) #6
store i32 0, ptr %i, align 4, !tbaa !4
br label %for.cond
@@ -65,12 +65,12 @@ for.inc: ; preds = %for.body
br label %for.cond
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #6
+ call void @llvm.lifetime.end.p0(ptr %i) #6
ret void
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nounwind readnone speculatable willreturn
declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
@@ -79,7 +79,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
declare dso_local i32 @rand() #3
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local i32 @main() #0 {
@@ -90,9 +90,9 @@ entry:
%condition = alloca i32, align 4
store i32 0, ptr %retval, align 4
call void @init_arry()
- call void @llvm.lifetime.start.p0(i64 4, ptr %val) #6
+ call void @llvm.lifetime.start.p0(ptr %val) #6
store i32 0, ptr %val, align 4, !tbaa !4
- call void @llvm.lifetime.start.p0(i64 4, ptr %j) #6
+ call void @llvm.lifetime.start.p0(ptr %j) #6
store i32 0, ptr %j, align 4, !tbaa !4
br label %for.cond
@@ -102,7 +102,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 4, ptr %condition) #6
+ call void @llvm.lifetime.start.p0(ptr %condition) #6
%call = call i32 @rand() #6
%rem = srem i32 %call, 5
store i32 %rem, ptr %condition, align 4, !tbaa !4
@@ -138,7 +138,7 @@ sw.default: ; preds = %for.body
unreachable
sw.epilog: ; preds = %sw.bb3, %sw.bb2, %sw.bb
- call void @llvm.lifetime.end.p0(i64 4, ptr %condition) #6
+ call void @llvm.lifetime.end.p0(ptr %condition) #6
br label %for.inc
for.inc: ; preds = %sw.epilog
@@ -148,8 +148,8 @@ for.inc: ; preds = %sw.epilog
br label %for.cond
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %j) #6
- call void @llvm.lifetime.end.p0(i64 4, ptr %val) #6
+ call void @llvm.lifetime.end.p0(ptr %j) #6
+ call void @llvm.lifetime.end.p0(ptr %val) #6
ret i32 0
}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-switch.ll b/llvm/test/Transforms/PGOProfile/misexpect-switch.ll
index ebecee1..242d5b8 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-switch.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-switch.ll
@@ -43,7 +43,7 @@ target triple = "x86_64-unknown-linux-gnu"
define dso_local void @init_arry() #0 !dbg !21 {
entry:
%i = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #6, !dbg !26
+ call void @llvm.lifetime.start.p0(ptr %i) #6, !dbg !26
call void @llvm.dbg.declare(metadata ptr %i, metadata !25, metadata !DIExpression()), !dbg !27
store i32 0, ptr %i, align 4, !dbg !28, !tbaa !30
br label %for.cond, !dbg !34
@@ -69,12 +69,12 @@ for.inc: ; preds = %for.body
br label %for.cond, !dbg !47, !llvm.loop !48
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #6, !dbg !50
+ call void @llvm.lifetime.end.p0(ptr %i) #6, !dbg !50
ret void, !dbg !50
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nounwind readnone speculatable willreturn
declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
@@ -83,7 +83,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
declare dso_local i32 @rand() #3
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local i32 @main() #0 !dbg !51 {
@@ -94,10 +94,10 @@ entry:
%condition = alloca i32, align 4
store i32 0, ptr %retval, align 4
call void @init_arry(), !dbg !62
- call void @llvm.lifetime.start.p0(i64 4, ptr %val) #6, !dbg !63
+ call void @llvm.lifetime.start.p0(ptr %val) #6, !dbg !63
call void @llvm.dbg.declare(metadata ptr %val, metadata !55, metadata !DIExpression()), !dbg !64
store i32 0, ptr %val, align 4, !dbg !64, !tbaa !30
- call void @llvm.lifetime.start.p0(i64 4, ptr %j) #6, !dbg !65
+ call void @llvm.lifetime.start.p0(ptr %j) #6, !dbg !65
call void @llvm.dbg.declare(metadata ptr %j, metadata !56, metadata !DIExpression()), !dbg !66
store i32 0, ptr %j, align 4, !dbg !67, !tbaa !30
br label %for.cond, !dbg !68
@@ -108,7 +108,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.end, !dbg !71
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 4, ptr %condition) #6, !dbg !72
+ call void @llvm.lifetime.start.p0(ptr %condition) #6, !dbg !72
call void @llvm.dbg.declare(metadata ptr %condition, metadata !57, metadata !DIExpression()), !dbg !73
%call = call i32 @rand() #6, !dbg !74
%rem = srem i32 %call, 5, !dbg !75
@@ -145,7 +145,7 @@ sw.default: ; preds = %for.body
unreachable, !dbg !87
sw.epilog: ; preds = %sw.bb3, %sw.bb2, %sw.bb
- call void @llvm.lifetime.end.p0(i64 4, ptr %condition) #6, !dbg !88
+ call void @llvm.lifetime.end.p0(ptr %condition) #6, !dbg !88
br label %for.inc, !dbg !89
for.inc: ; preds = %sw.epilog
@@ -155,8 +155,8 @@ for.inc: ; preds = %sw.epilog
br label %for.cond, !dbg !91, !llvm.loop !92
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %j) #6, !dbg !94
- call void @llvm.lifetime.end.p0(i64 4, ptr %val) #6, !dbg !94
+ call void @llvm.lifetime.end.p0(ptr %j) #6, !dbg !94
+ call void @llvm.lifetime.end.p0(ptr %val) #6, !dbg !94
ret i32 0, !dbg !95
}
diff --git a/llvm/test/Transforms/PartialInlining/switch_stmt.ll b/llvm/test/Transforms/PartialInlining/switch_stmt.ll
index 3f43369..d99fbba 100644
--- a/llvm/test/Transforms/PartialInlining/switch_stmt.ll
+++ b/llvm/test/Transforms/PartialInlining/switch_stmt.ll
@@ -62,7 +62,7 @@ define dso_local signext i32 @caller(i32 signext %c) !prof !30 {
; CHECK-LABEL: @caller(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RC_I:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[RC_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[RC_I]])
; CHECK-NEXT: store i32 0, ptr [[RC_I]], align 4
; CHECK-NEXT: switch i32 [[C:%.*]], label [[SW_DEFAULT_I:%.*]] [
; CHECK-NEXT: i32 0, label [[CODEREPL_I:%.*]]
@@ -83,7 +83,7 @@ define dso_local signext i32 @caller(i32 signext %c) !prof !30 {
; CHECK-NEXT: br label [[CALLEE_1_EXIT]]
; CHECK: callee.1.exit:
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[RC_I]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[RC_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[RC_I]])
;
entry:
%0 = call signext i32 @callee(i32 signext %c, i32 signext %c)
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll
index b4b12da..141503d 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll
@@ -68,9 +68,9 @@ entry:
store ptr %array, ptr %array.addr, align 8
store i32 %count, ptr %count.addr, align 4
store i32 %n, ptr %n.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %sum) #3
+ call void @llvm.lifetime.start.p0(ptr %sum) #3
store i32 0, ptr %sum, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #3
+ call void @llvm.lifetime.start.p0(ptr %i) #3
store i32 0, ptr %i, align 4
br label %for.cond
@@ -81,7 +81,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #3
+ call void @llvm.lifetime.end.p0(ptr %i) #3
br label %for.end
for.body: ; preds = %for.cond
@@ -113,7 +113,7 @@ for.inc: ; preds = %if.end
for.end: ; preds = %for.cond.cleanup
%9 = load i32, ptr %sum, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %sum)
+ call void @llvm.lifetime.end.p0(ptr %sum)
ret i32 %9
}
@@ -184,9 +184,9 @@ entry:
%1 = getelementptr inbounds { ptr, i64 }, ptr %s, i32 0, i32 1
store i64 %s.coerce1, ptr %1, align 8
store i64 %n, ptr %n.addr, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr %ret) #7
+ call void @llvm.lifetime.start.p0(ptr %ret) #7
store i32 0, ptr %ret, align 4
- call void @llvm.lifetime.start.p0(i64 8, ptr %i) #7
+ call void @llvm.lifetime.start.p0(ptr %i) #7
store i64 0, ptr %i, align 8
br label %for.cond
@@ -197,7 +197,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 8, ptr %i) #7
+ call void @llvm.lifetime.end.p0(ptr %i) #7
br label %for.end
for.body: ; preds = %for.cond
@@ -217,7 +217,7 @@ for.inc: ; preds = %for.body
for.end: ; preds = %for.cond.cleanup
%8 = load i32, ptr %ret, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %ret)
+ call void @llvm.lifetime.end.p0(ptr %ret)
ret i32 %8
}
@@ -283,11 +283,11 @@ entry:
ret i64 %0
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.trap()
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
;.
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
index f583a61..e74bf592 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
@@ -89,7 +89,7 @@ entry:
%i = alloca i32, align 4
store ptr %X, ptr %X.addr, align 8
store ptr %Y, ptr %Y.addr, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.start.p0(ptr %i) #2
store i32 0, ptr %i, align 4
br label %for.cond
@@ -99,7 +99,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.end.p0(ptr %i) #2
br label %for.end
for.body: ; preds = %for.cond
@@ -237,6 +237,6 @@ exit:
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
index 089511d..9679806 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
@@ -408,7 +408,7 @@ entry:
store i32 %i, ptr %i.addr, align 4
store ptr %A, ptr %A.addr, align 8
store ptr %B, ptr %B.addr, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr %j) #3
+ call void @llvm.lifetime.start.p0(ptr %j) #3
store i32 0, ptr %j, align 4
br label %for.cond
@@ -419,11 +419,11 @@ for.cond: ; preds = %for.inc12, %entry
for.cond.cleanup: ; preds = %for.cond
store i32 2, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %j) #3
+ call void @llvm.lifetime.end.p0(ptr %j) #3
br label %for.end14
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 4, ptr %k) #3
+ call void @llvm.lifetime.start.p0(ptr %k) #3
store i32 0, ptr %k, align 4
br label %for.cond1
@@ -435,7 +435,7 @@ for.cond1: ; preds = %for.inc, %for.body
for.cond.cleanup3: ; preds = %for.cond1
store i32 5, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %k) #3
+ call void @llvm.lifetime.end.p0(ptr %k) #3
br label %for.end
for.body4: ; preds = %for.cond1
@@ -501,13 +501,13 @@ for.end14: ; preds = %for.cond.cleanup
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: inaccessiblememonly nofree nosync nounwind willreturn
declare void @llvm.assume(i1 noundef) #2
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind ssp uwtable mustprogress
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
index c7098d2..e8709a5 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
@@ -108,13 +108,13 @@ entry:
store ptr %samples, ptr %samples.addr, align 8
store double %Y, ptr %Y.addr, align 8
store double %Z, ptr %Z.addr, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2
- call void @llvm.lifetime.start.p0(i64 4, ptr %block) #2
- call void @llvm.lifetime.start.p0(i64 8, ptr %rngVal) #2
- call void @llvm.lifetime.start.p0(i64 8, ptr %callValue) #2
- call void @llvm.lifetime.start.p0(i64 8, ptr %v0) #2
+ call void @llvm.lifetime.start.p0(ptr %i) #2
+ call void @llvm.lifetime.start.p0(ptr %block) #2
+ call void @llvm.lifetime.start.p0(ptr %rngVal) #2
+ call void @llvm.lifetime.start.p0(ptr %callValue) #2
+ call void @llvm.lifetime.start.p0(ptr %v0) #2
store double 0.000000e+00, ptr %v0, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %v1) #2
+ call void @llvm.lifetime.start.p0(ptr %v1) #2
store double 0.000000e+00, ptr %v1, align 8
store i32 0, ptr %i, align 4
br label %for.cond
@@ -169,12 +169,12 @@ for.end: ; preds = %for.cond
%15 = load double, ptr %v0, align 8
%16 = load double, ptr %v1, align 8
%add5 = fadd fast double %15, %16
- call void @llvm.lifetime.end.p0(i64 8, ptr %v1) #2
- call void @llvm.lifetime.end.p0(i64 8, ptr %v0) #2
- call void @llvm.lifetime.end.p0(i64 8, ptr %callValue) #2
- call void @llvm.lifetime.end.p0(i64 8, ptr %rngVal) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %block) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.end.p0(ptr %v1) #2
+ call void @llvm.lifetime.end.p0(ptr %v0) #2
+ call void @llvm.lifetime.end.p0(ptr %callValue) #2
+ call void @llvm.lifetime.end.p0(ptr %rngVal) #2
+ call void @llvm.lifetime.end.p0(ptr %block) #2
+ call void @llvm.lifetime.end.p0(ptr %i) #2
ret double %add5
}
@@ -305,13 +305,13 @@ entry:
store ptr %samples, ptr %samples.addr, align 8
store double %Y, ptr %Y.addr, align 8
store double %Z, ptr %Z.addr, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #4
- call void @llvm.lifetime.start.p0(i64 4, ptr %block) #4
- call void @llvm.lifetime.start.p0(i64 8, ptr %rngVal) #4
- call void @llvm.lifetime.start.p0(i64 8, ptr %callValue) #4
- call void @llvm.lifetime.start.p0(i64 8, ptr %v0) #4
+ call void @llvm.lifetime.start.p0(ptr %i) #4
+ call void @llvm.lifetime.start.p0(ptr %block) #4
+ call void @llvm.lifetime.start.p0(ptr %rngVal) #4
+ call void @llvm.lifetime.start.p0(ptr %callValue) #4
+ call void @llvm.lifetime.start.p0(ptr %v0) #4
store double 0.000000e+00, ptr %v0, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %v1) #4
+ call void @llvm.lifetime.start.p0(ptr %v1) #4
store double 0.000000e+00, ptr %v1, align 8
store i32 0, ptr %block, align 4
br label %for.cond
@@ -389,19 +389,19 @@ for.end10: ; preds = %for.cond
%21 = load double, ptr %v0, align 8
%22 = load double, ptr %v1, align 8
%add11 = fadd fast double %21, %22
- call void @llvm.lifetime.end.p0(i64 8, ptr %v1) #4
- call void @llvm.lifetime.end.p0(i64 8, ptr %v0) #4
- call void @llvm.lifetime.end.p0(i64 8, ptr %callValue) #4
- call void @llvm.lifetime.end.p0(i64 8, ptr %rngVal) #4
- call void @llvm.lifetime.end.p0(i64 4, ptr %block) #4
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #4
+ call void @llvm.lifetime.end.p0(ptr %v1) #4
+ call void @llvm.lifetime.end.p0(ptr %v0) #4
+ call void @llvm.lifetime.end.p0(ptr %callValue) #4
+ call void @llvm.lifetime.end.p0(ptr %rngVal) #4
+ call void @llvm.lifetime.end.p0(ptr %block) #4
+ call void @llvm.lifetime.end.p0(ptr %i) #4
ret double %add11
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @resample(i32 noundef, ptr noundef)
declare double @llvm.exp2.f64(double)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
;.
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll
index d55559d..258ef63 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll
@@ -525,9 +525,9 @@ entry:
store ptr %dct, ptr %dct.addr, align 8
store ptr %mf, ptr %mf.addr, align 8
store ptr %bias, ptr %bias.addr, align 8
- call void @llvm.lifetime.start.p0(i64 4, ptr %nz) #2
+ call void @llvm.lifetime.start.p0(ptr %nz) #2
store i32 0, ptr %nz, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.start.p0(ptr %i) #2
store i32 0, ptr %i, align 4
br label %for.cond
@@ -537,7 +537,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.end.p0(ptr %i) #2
br label %for.end
for.body: ; preds = %for.cond
@@ -636,13 +636,13 @@ for.end: ; preds = %for.cond.cleanup
%lnot = xor i1 %tobool, true
%lnot34 = xor i1 %lnot, true
%lnot.ext = zext i1 %lnot34 to i32
- call void @llvm.lifetime.end.p0(i64 4, ptr %nz) #2
+ call void @llvm.lifetime.end.p0(ptr %nz) #2
ret i32 %lnot.ext
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/slpordering.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/slpordering.ll
index a201983..d19242f 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/slpordering.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/slpordering.ll
@@ -136,14 +136,14 @@ entry:
store i32 %ip1, ptr %ip1.addr, align 4, !tbaa !8
store ptr %p2, ptr %p2.addr, align 8, !tbaa !4
store i32 %ip2, ptr %ip2.addr, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 64, ptr %emp) #2
- call void @llvm.lifetime.start.p0(i64 4, ptr %r0) #2
- call void @llvm.lifetime.start.p0(i64 4, ptr %r1) #2
- call void @llvm.lifetime.start.p0(i64 4, ptr %r2) #2
- call void @llvm.lifetime.start.p0(i64 4, ptr %r3) #2
- call void @llvm.lifetime.start.p0(i64 4, ptr %sum) #2
+ call void @llvm.lifetime.start.p0(ptr %emp) #2
+ call void @llvm.lifetime.start.p0(ptr %r0) #2
+ call void @llvm.lifetime.start.p0(ptr %r1) #2
+ call void @llvm.lifetime.start.p0(ptr %r2) #2
+ call void @llvm.lifetime.start.p0(ptr %r3) #2
+ call void @llvm.lifetime.start.p0(ptr %sum) #2
store i32 0, ptr %sum, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.start.p0(ptr %i) #2
store i32 0, ptr %i, align 4, !tbaa !8
br label %for.cond
@@ -153,7 +153,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.end.p0(ptr %i) #2
br label %for.end
for.body: ; preds = %for.cond
@@ -241,22 +241,22 @@ for.body: ; preds = %for.cond
%shl42 = shl i32 %sub41, 16
%rdd43 = add nsw i32 %sub36, %shl42
store i32 %rdd43, ptr %r3, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %e0) #2
+ call void @llvm.lifetime.start.p0(ptr %e0) #2
%33 = load i32, ptr %r0, align 4, !tbaa !8
%34 = load i32, ptr %r1, align 4, !tbaa !8
%rdd44 = add i32 %33, %34
store i32 %rdd44, ptr %e0, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %e1) #2
+ call void @llvm.lifetime.start.p0(ptr %e1) #2
%35 = load i32, ptr %r0, align 4, !tbaa !8
%36 = load i32, ptr %r1, align 4, !tbaa !8
%sub45 = sub i32 %35, %36
store i32 %sub45, ptr %e1, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %e2) #2
+ call void @llvm.lifetime.start.p0(ptr %e2) #2
%37 = load i32, ptr %r2, align 4, !tbaa !8
%38 = load i32, ptr %r3, align 4, !tbaa !8
%rdd46 = add i32 %37, %38
store i32 %rdd46, ptr %e2, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %e3) #2
+ call void @llvm.lifetime.start.p0(ptr %e3) #2
%39 = load i32, ptr %r2, align 4, !tbaa !8
%40 = load i32, ptr %r3, align 4, !tbaa !8
%sub47 = sub i32 %39, %40
@@ -293,10 +293,10 @@ for.body: ; preds = %for.cond
%rrrayidx61 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 %idxprom60
%rrrayidx62 = getelementptr inbounds [4 x i32], ptr %rrrayidx61, i64 0, i64 3
store i32 %sub59, ptr %rrrayidx62, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 4, ptr %e3) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %e2) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %e1) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %e0) #2
+ call void @llvm.lifetime.end.p0(ptr %e3) #2
+ call void @llvm.lifetime.end.p0(ptr %e2) #2
+ call void @llvm.lifetime.end.p0(ptr %e1) #2
+ call void @llvm.lifetime.end.p0(ptr %e0) #2
br label %for.inc
for.inc: ; preds = %for.body
@@ -316,7 +316,7 @@ for.inc: ; preds = %for.body
br label %for.cond, !llvm.loop !11
for.end: ; preds = %for.cond.cleanup
- call void @llvm.lifetime.start.p0(i64 4, ptr %i65) #2
+ call void @llvm.lifetime.start.p0(ptr %i65) #2
store i32 0, ptr %i65, align 4, !tbaa !8
br label %for.cond66
@@ -326,11 +326,11 @@ for.cond66: ; preds = %for.inc114, %for.en
br i1 %cmp67, label %for.body70, label %for.cond.cleanup69
for.cond.cleanup69: ; preds = %for.cond66
- call void @llvm.lifetime.end.p0(i64 4, ptr %i65) #2
+ call void @llvm.lifetime.end.p0(ptr %i65) #2
br label %for.end116
for.body70: ; preds = %for.cond66
- call void @llvm.lifetime.start.p0(i64 4, ptr %e071) #2
+ call void @llvm.lifetime.start.p0(ptr %e071) #2
%rrrayidx72 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 0
%59 = load i32, ptr %i65, align 4, !tbaa !8
%idxprom73 = sext i32 %59 to i64
@@ -343,7 +343,7 @@ for.body70: ; preds = %for.cond66
%62 = load i32, ptr %rrrayidx77, align 4, !tbaa !8
%rdd78 = add i32 %60, %62
store i32 %rdd78, ptr %e071, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %e179) #2
+ call void @llvm.lifetime.start.p0(ptr %e179) #2
%rrrayidx80 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 0
%63 = load i32, ptr %i65, align 4, !tbaa !8
%idxprom81 = sext i32 %63 to i64
@@ -356,7 +356,7 @@ for.body70: ; preds = %for.cond66
%66 = load i32, ptr %rrrayidx85, align 4, !tbaa !8
%sub86 = sub i32 %64, %66
store i32 %sub86, ptr %e179, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %e287) #2
+ call void @llvm.lifetime.start.p0(ptr %e287) #2
%rrrayidx88 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 2
%67 = load i32, ptr %i65, align 4, !tbaa !8
%idxprom89 = sext i32 %67 to i64
@@ -369,7 +369,7 @@ for.body70: ; preds = %for.cond66
%70 = load i32, ptr %rrrayidx93, align 4, !tbaa !8
%rdd94 = add i32 %68, %70
store i32 %rdd94, ptr %e287, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %e395) #2
+ call void @llvm.lifetime.start.p0(ptr %e395) #2
%rrrayidx96 = getelementptr inbounds [4 x [4 x i32]], ptr %emp, i64 0, i64 2
%71 = load i32, ptr %i65, align 4, !tbaa !8
%idxprom97 = sext i32 %71 to i64
@@ -398,10 +398,10 @@ for.body70: ; preds = %for.cond66
%82 = load i32, ptr %e395, align 4, !tbaa !8
%sub106 = sub nsw i32 %81, %82
store i32 %sub106, ptr %r3, align 4, !tbaa !8
- call void @llvm.lifetime.end.p0(i64 4, ptr %e395) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %e287) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %e179) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %e071) #2
+ call void @llvm.lifetime.end.p0(ptr %e395) #2
+ call void @llvm.lifetime.end.p0(ptr %e287) #2
+ call void @llvm.lifetime.end.p0(ptr %e179) #2
+ call void @llvm.lifetime.end.p0(ptr %e071) #2
%83 = load i32, ptr %r0, align 4, !tbaa !8
%call = call i32 @twoabs(i32 noundef %83)
%84 = load i32, ptr %r1, align 4, !tbaa !8
@@ -432,20 +432,20 @@ for.end116: ; preds = %for.cond.cleanup69
%shr = lshr i32 %90, 16
%rdd119 = add i32 %conv118, %shr
%shr120 = lshr i32 %rdd119, 1
- call void @llvm.lifetime.end.p0(i64 4, ptr %sum) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %r3) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %r2) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %r1) #2
- call void @llvm.lifetime.end.p0(i64 4, ptr %r0) #2
- call void @llvm.lifetime.end.p0(i64 64, ptr %emp) #2
+ call void @llvm.lifetime.end.p0(ptr %sum) #2
+ call void @llvm.lifetime.end.p0(ptr %r3) #2
+ call void @llvm.lifetime.end.p0(ptr %r2) #2
+ call void @llvm.lifetime.end.p0(ptr %r1) #2
+ call void @llvm.lifetime.end.p0(ptr %r0) #2
+ call void @llvm.lifetime.end.p0(ptr %emp) #2
ret i32 %shr120
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define internal i32 @twoabs(i32 noundef %r) #0 {
@@ -453,7 +453,7 @@ entry:
%r.addr = alloca i32, align 4
%s = alloca i32, align 4
store i32 %r, ptr %r.addr, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %s) #2
+ call void @llvm.lifetime.start.p0(ptr %s) #2
%0 = load i32, ptr %r.addr, align 4, !tbaa !8
%shr = lshr i32 %0, 15
%rnd = and i32 %shr, 65537
@@ -464,7 +464,7 @@ entry:
%rdd = add i32 %1, %2
%3 = load i32, ptr %s, align 4, !tbaa !8
%xor = xor i32 %rdd, %3
- call void @llvm.lifetime.end.p0(i64 4, ptr %s) #2
+ call void @llvm.lifetime.end.p0(ptr %s) #2
ret i32 %xor
}
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/udotabd.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/udotabd.ll
index 3496520..0967736 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/udotabd.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/udotabd.ll
@@ -379,9 +379,9 @@ entry:
store i32 %s_p1, ptr %s_p1.addr, align 4, !tbaa !9
store ptr %p2, ptr %p2.addr, align 8, !tbaa !4
store i32 %s_p2, ptr %s_p2.addr, align 4, !tbaa !9
- call void @llvm.lifetime.start.p0(i64 4, ptr %i_sum) #3
+ call void @llvm.lifetime.start.p0(ptr %i_sum) #3
store i32 0, ptr %i_sum, align 4, !tbaa !9
- call void @llvm.lifetime.start.p0(i64 4, ptr %y) #3
+ call void @llvm.lifetime.start.p0(ptr %y) #3
store i32 0, ptr %y, align 4, !tbaa !9
br label %for.cond
@@ -392,11 +392,11 @@ for.cond: ; preds = %for.inc10, %entry
for.cond.cleanup: ; preds = %for.cond
store i32 2, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %y) #3
+ call void @llvm.lifetime.end.p0(ptr %y) #3
br label %for.end12
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) #3
+ call void @llvm.lifetime.start.p0(ptr %x) #3
store i32 0, ptr %x, align 4, !tbaa !9
br label %for.cond1
@@ -407,7 +407,7 @@ for.cond1: ; preds = %for.inc, %for.body
for.cond.cleanup3: ; preds = %for.cond1
store i32 5, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %x) #3
+ call void @llvm.lifetime.end.p0(ptr %x) #3
br label %for.end
for.body4: ; preds = %for.cond1
@@ -458,18 +458,18 @@ for.inc10: ; preds = %for.end
for.end12: ; preds = %for.cond.cleanup
%16 = load i32, ptr %i_sum, align 4, !tbaa !9
store i32 1, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %i_sum) #3
+ call void @llvm.lifetime.end.p0(ptr %i_sum) #3
ret i32 %16
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr) #1
+declare void @llvm.lifetime.start.p0(ptr) #1
; Function Attrs: nocallback nofree nosync nounwind speculatable willreturn memory(none)
declare i32 @llvm.abs.i32(i32, i1 immarg) #2
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr) #1
+declare void @llvm.lifetime.end.p0(ptr) #1
attributes #0 = { nounwind uwtable vscale_range(1,16) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+bf16,+bti,+ccidx,+complxnum,+crc,+dit,+dotprod,+ete,+flagm,+fp-armv8,+fp16fml,+fullfp16,+i8mm,+jsconv,+lse,+mte,+neon,+pauth,+perfmon,+predres,+rand,+ras,+rcpc,+rdm,+sb,+spe,+ssbs,+sve,+sve-bitperm,+sve2,+trbe,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a,+v9a,-fmv" }
attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) }
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll
index 76d9d14..0023dea 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll
@@ -43,7 +43,7 @@ entry:
store ptr %pSrcB, ptr %pSrcB.addr, align 4
store ptr %pDst, ptr %pDst.addr, align 4
store i32 %blockSize, ptr %blockSize.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %blkCnt)
+ call void @llvm.lifetime.start.p0(ptr %blkCnt)
%0 = load i32, ptr %blockSize.addr, align 4
store i32 %0, ptr %blkCnt, align 4
br label %while.cond
@@ -78,7 +78,7 @@ while.body: ; preds = %while.cond
br label %while.cond
while.end: ; preds = %while.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %blkCnt)
+ call void @llvm.lifetime.end.p0(ptr %blkCnt)
ret void
}
@@ -102,13 +102,13 @@ land.lhs.true: ; preds = %entry
br i1 %cmp1, label %if.then, label %if.end10
if.then: ; preds = %land.lhs.true
- call void @llvm.lifetime.start.p0(i64 4, ptr %max)
+ call void @llvm.lifetime.start.p0(ptr %max)
%2 = load i32, ptr %sat.addr, align 4
%sub = sub i32 %2, 1
%shl = shl i32 1, %sub
%sub2 = sub i32 %shl, 1
store i32 %sub2, ptr %max, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %min)
+ call void @llvm.lifetime.start.p0(ptr %min)
%3 = load i32, ptr %max, align 4
%sub3 = sub nsw i32 -1, %3
store i32 %sub3, ptr %min, align 4
@@ -143,8 +143,8 @@ if.end8: ; preds = %if.end
br label %cleanup
cleanup: ; preds = %if.end8, %if.then7, %if.then5
- call void @llvm.lifetime.end.p0(i64 4, ptr %min)
- call void @llvm.lifetime.end.p0(i64 4, ptr %max)
+ call void @llvm.lifetime.end.p0(ptr %min)
+ call void @llvm.lifetime.end.p0(ptr %max)
%cleanup.dest = load i32, ptr %cleanup.dest.slot, align 4
switch i32 %cleanup.dest, label %unreachable [
i32 0, label %cleanup.cont
@@ -167,8 +167,8 @@ unreachable: ; preds = %cleanup
unreachable
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
attributes #0 = { nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" }
attributes #1 = { alwaysinline nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" }
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll
index 2ab6f2b..436f848 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll
@@ -59,8 +59,8 @@ entry:
store i8 %value, ptr %value.addr, align 1, !tbaa !3
store ptr %pDst, ptr %pDst.addr, align 4, !tbaa !6
store i32 %blockSize, ptr %blockSize.addr, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %blkCnt) #3
- call void @llvm.lifetime.start.p0(i64 4, ptr %packedValue) #3
+ call void @llvm.lifetime.start.p0(ptr %blkCnt) #3
+ call void @llvm.lifetime.start.p0(ptr %packedValue) #3
%0 = load i8, ptr %value.addr, align 1, !tbaa !3
%conv = sext i8 %0 to i32
%shl = shl i32 %conv, 0
@@ -122,13 +122,13 @@ while.body16: ; preds = %while.cond13
br label %while.cond13, !llvm.loop !12
while.end18: ; preds = %while.cond13
- call void @llvm.lifetime.end.p0(i64 4, ptr %packedValue) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr %blkCnt) #3
+ call void @llvm.lifetime.end.p0(ptr %packedValue) #3
+ call void @llvm.lifetime.end.p0(ptr %blkCnt) #3
ret void
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: alwaysinline nounwind
define internal void @write_q7x4_ia(ptr %pQ7, i32 %value) #2 {
@@ -138,7 +138,7 @@ entry:
%val = alloca i32, align 4
store ptr %pQ7, ptr %pQ7.addr, align 4, !tbaa !6
store i32 %value, ptr %value.addr, align 4, !tbaa !8
- call void @llvm.lifetime.start.p0(i64 4, ptr %val) #3
+ call void @llvm.lifetime.start.p0(ptr %val) #3
%0 = load i32, ptr %value.addr, align 4, !tbaa !8
store i32 %0, ptr %val, align 4, !tbaa !8
%1 = load i32, ptr %val, align 4, !tbaa !8
@@ -175,12 +175,12 @@ entry:
%14 = load ptr, ptr %13, align 4, !tbaa !6
%add.ptr = getelementptr inbounds i8, ptr %14, i32 4
store ptr %add.ptr, ptr %13, align 4, !tbaa !6
- call void @llvm.lifetime.end.p0(i64 4, ptr %val) #3
+ call void @llvm.lifetime.end.p0(ptr %val) #3
ret void
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m0plus" "target-features"="+armv6-m,+strict-align,+thumb-mode,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-dotprod,-dsp,-fp16fml,-fullfp16,-hwdiv,-hwdiv-arm,-i8mm,-lob,-mve,-mve.fp,-ras,-sb,-sha2" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
index b932a69..6862d8b 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
@@ -60,9 +60,9 @@ entry:
store ptr %pSrc, ptr %pSrc.addr, align 4
store i32 %blockSize, ptr %blockSize.addr, align 4
store ptr %pResult, ptr %pResult.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %blkCnt) #3
- call void @llvm.lifetime.start.p0(i64 16, ptr %vecSrc) #3
- call void @llvm.lifetime.start.p0(i64 4, ptr %sum) #3
+ call void @llvm.lifetime.start.p0(ptr %blkCnt) #3
+ call void @llvm.lifetime.start.p0(ptr %vecSrc) #3
+ call void @llvm.lifetime.start.p0(ptr %sum) #3
store i32 0, ptr %sum, align 4
%0 = load i32, ptr %blockSize.addr, align 4
%shr = lshr i32 %0, 4
@@ -123,15 +123,15 @@ while.end5: ; preds = %while.cond1
%conv6 = trunc i32 %div to i8
%18 = load ptr, ptr %pResult.addr, align 4
store i8 %conv6, ptr %18, align 1
- call void @llvm.lifetime.end.p0(i64 4, ptr %sum) #3
- call void @llvm.lifetime.end.p0(i64 16, ptr %vecSrc) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr %blkCnt) #3
+ call void @llvm.lifetime.end.p0(ptr %sum) #3
+ call void @llvm.lifetime.end.p0(ptr %vecSrc) #3
+ call void @llvm.lifetime.end.p0(ptr %blkCnt) #3
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare i32 @llvm.arm.mve.addv.v16i8(<16 x i8>, i32) #2
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "approx-func-fp-math"="true" "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-pacbti,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" }
attributes #1 = { argmemonly nocallback nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
index 9d613b8..42fdafb 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
@@ -88,7 +88,7 @@ entry:
store ptr %pSrcB, ptr %pSrcB.addr, align 4
store ptr %pDst, ptr %pDst.addr, align 4
store i32 %blockSize, ptr %blockSize.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %blkCnt) #3
+ call void @llvm.lifetime.start.p0(ptr %blkCnt) #3
%0 = load i32, ptr %blockSize.addr, align 4
store i32 %0, ptr %blkCnt, align 4
br label %while.cond
@@ -123,11 +123,11 @@ while.body: ; preds = %while.cond
br label %while.cond
while.end: ; preds = %while.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %blkCnt) #3
+ call void @llvm.lifetime.end.p0(ptr %blkCnt) #3
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
define internal i32 @__SSAT(i32 %val, i32 %sat) #2 {
entry:
@@ -149,13 +149,13 @@ land.lhs.true: ; preds = %entry
br i1 %cmp1, label %if.then, label %if.end10
if.then: ; preds = %land.lhs.true
- call void @llvm.lifetime.start.p0(i64 4, ptr %max) #3
+ call void @llvm.lifetime.start.p0(ptr %max) #3
%2 = load i32, ptr %sat.addr, align 4
%sub = sub i32 %2, 1
%shl = shl i32 1, %sub
%sub2 = sub i32 %shl, 1
store i32 %sub2, ptr %max, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %min) #3
+ call void @llvm.lifetime.start.p0(ptr %min) #3
%3 = load i32, ptr %max, align 4
%sub3 = sub nsw i32 -1, %3
store i32 %sub3, ptr %min, align 4
@@ -190,8 +190,8 @@ if.end8: ; preds = %if.end
br label %cleanup
cleanup: ; preds = %if.end8, %if.then7, %if.then5
- call void @llvm.lifetime.end.p0(i64 4, ptr %min) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr %max) #3
+ call void @llvm.lifetime.end.p0(ptr %min) #3
+ call void @llvm.lifetime.end.p0(ptr %max) #3
%cleanup.dest = load i32, ptr %cleanup.dest.slot, align 4
switch i32 %cleanup.dest, label %unreachable [
i32 0, label %cleanup.cont
@@ -214,7 +214,7 @@ unreachable: ; preds = %cleanup
unreachable
}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp0,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" }
attributes #1 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll b/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll
index 5178e9f..7fe3f33 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-final-loop-unrolling-2.ll
@@ -60,9 +60,9 @@ bb:
%i5 = alloca ptr, align 8
store i32 %arg, ptr %i, align 4, !tbaa !5
store ptr %arg1, ptr %i2, align 8, !tbaa !9
- call void @llvm.lifetime.start.p0(i64 8, ptr %i3) #3
+ call void @llvm.lifetime.start.p0(ptr %i3) #3
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %i3, ptr align 4 @global, i64 8, i1 false)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i4) #3
+ call void @llvm.lifetime.start.p0(ptr %i4) #3
store i32 0, ptr %i4, align 4, !tbaa !5
br label %bb6
@@ -75,11 +75,11 @@ bb6: ; preds = %bb22, %bb
br i1 %i11, label %bb13, label %bb12
bb12: ; preds = %bb6
- call void @llvm.lifetime.end.p0(i64 4, ptr %i4) #3
+ call void @llvm.lifetime.end.p0(ptr %i4) #3
br label %bb25
bb13: ; preds = %bb6
- call void @llvm.lifetime.start.p0(i64 8, ptr %i5) #3
+ call void @llvm.lifetime.start.p0(ptr %i5) #3
%i14 = load i32, ptr %i4, align 4, !tbaa !5
%i15 = srem i32 %i14, 2
%i16 = sext i32 %i15 to i64
@@ -90,7 +90,7 @@ bb13: ; preds = %bb6
%i20 = load i32, ptr %i19, align 4, !tbaa !5
%i21 = mul nsw i32 %i20, %i18
store i32 %i21, ptr %i19, align 4, !tbaa !5
- call void @llvm.lifetime.end.p0(i64 8, ptr %i5) #3
+ call void @llvm.lifetime.end.p0(ptr %i5) #3
br label %bb22
bb22: ; preds = %bb13
@@ -102,12 +102,12 @@ bb22: ; preds = %bb13
bb25: ; preds = %bb12
%i26 = load ptr, ptr %i2, align 8, !tbaa !9
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %i26, ptr align 4 %i3, i64 8, i1 false), !tbaa.struct !13
- call void @llvm.lifetime.end.p0(i64 8, ptr %i3) #3
+ call void @llvm.lifetime.end.p0(ptr %i3) #3
ret void
}
; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: argmemonly nocallback nofree nounwind willreturn
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #2
@@ -126,7 +126,7 @@ bb:
}
; Function Attrs: argmemonly nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: mustprogress nounwind uwtable
define linkonce_odr dso_local noundef nonnull align 4 dereferenceable(4) ptr @widget(ptr noundef nonnull align 4 dereferenceable(8) %arg, i64 noundef %arg1) #0 comdat($_ZNSt14__array_traitsIiLm2EE6_S_refERA2_Kim) align 2 {
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-loop-unrolling.ll b/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-loop-unrolling.ll
index c6dc7b3..51f2a36 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-loop-unrolling.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/SROA-after-loop-unrolling.ll
@@ -50,14 +50,14 @@ entry:
%__end15 = alloca ptr
%elt11 = alloca ptr
store i32 %cnt, ptr %cnt.addr
- call void @llvm.lifetime.start.p0(i64 24, ptr %arr)
- call void @llvm.lifetime.start.p0(i64 8, ptr %__range1)
+ call void @llvm.lifetime.start.p0(ptr %arr)
+ call void @llvm.lifetime.start.p0(ptr %__range1)
store ptr %arr, ptr %__range1
- call void @llvm.lifetime.start.p0(i64 8, ptr %__begin1)
+ call void @llvm.lifetime.start.p0(ptr %__begin1)
%0 = load ptr, ptr %__range1
%call = call ptr @_ZNSt5arrayIiLm6EE5beginEv(ptr %0)
store ptr %call, ptr %__begin1
- call void @llvm.lifetime.start.p0(i64 8, ptr %__end1)
+ call void @llvm.lifetime.start.p0(ptr %__end1)
%1 = load ptr, ptr %__range1
%call1 = call ptr @_ZNSt5arrayIiLm6EE3endEv(ptr %1)
store ptr %call1, ptr %__end1
@@ -70,13 +70,13 @@ for.cond:
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup:
- call void @llvm.lifetime.end.p0(i64 8, ptr %__end1)
- call void @llvm.lifetime.end.p0(i64 8, ptr %__begin1)
- call void @llvm.lifetime.end.p0(i64 8, ptr %__range1)
+ call void @llvm.lifetime.end.p0(ptr %__end1)
+ call void @llvm.lifetime.end.p0(ptr %__begin1)
+ call void @llvm.lifetime.end.p0(ptr %__range1)
br label %for.end
for.body:
- call void @llvm.lifetime.start.p0(i64 8, ptr %elt)
+ call void @llvm.lifetime.start.p0(ptr %elt)
%4 = load ptr, ptr %__begin1
store ptr %4, ptr %elt
%5 = load i32, ptr %cnt.addr
@@ -84,7 +84,7 @@ for.body:
store i32 %inc, ptr %cnt.addr
%6 = load ptr, ptr %elt
store i32 %inc, ptr %6
- call void @llvm.lifetime.end.p0(i64 8, ptr %elt)
+ call void @llvm.lifetime.end.p0(ptr %elt)
br label %for.inc
for.inc:
@@ -94,13 +94,13 @@ for.inc:
br label %for.cond
for.end:
- call void @llvm.lifetime.start.p0(i64 8, ptr %__range12)
+ call void @llvm.lifetime.start.p0(ptr %__range12)
store ptr %arr, ptr %__range12
- call void @llvm.lifetime.start.p0(i64 8, ptr %__begin13)
+ call void @llvm.lifetime.start.p0(ptr %__begin13)
%8 = load ptr, ptr %__range12
%call4 = call ptr @_ZNSt5arrayIiLm6EE5beginEv(ptr %8)
store ptr %call4, ptr %__begin13
- call void @llvm.lifetime.start.p0(i64 8, ptr %__end15)
+ call void @llvm.lifetime.start.p0(ptr %__end15)
%9 = load ptr, ptr %__range12
%call6 = call ptr @_ZNSt5arrayIiLm6EE3endEv(ptr %9)
store ptr %call6, ptr %__end15
@@ -113,19 +113,19 @@ for.cond7:
br i1 %cmp8, label %for.body10, label %for.cond.cleanup9
for.cond.cleanup9:
- call void @llvm.lifetime.end.p0(i64 8, ptr %__end15)
- call void @llvm.lifetime.end.p0(i64 8, ptr %__begin13)
- call void @llvm.lifetime.end.p0(i64 8, ptr %__range12)
+ call void @llvm.lifetime.end.p0(ptr %__end15)
+ call void @llvm.lifetime.end.p0(ptr %__begin13)
+ call void @llvm.lifetime.end.p0(ptr %__range12)
br label %for.end14
for.body10:
- call void @llvm.lifetime.start.p0(i64 8, ptr %elt11)
+ call void @llvm.lifetime.start.p0(ptr %elt11)
%12 = load ptr, ptr %__begin13
store ptr %12, ptr %elt11
%13 = load ptr, ptr %elt11
%14 = load i32, ptr %13
call void @_Z3usei(i32 %14)
- call void @llvm.lifetime.end.p0(i64 8, ptr %elt11)
+ call void @llvm.lifetime.end.p0(ptr %elt11)
br label %for.inc12
for.inc12:
@@ -135,11 +135,11 @@ for.inc12:
br label %for.cond7
for.end14:
- call void @llvm.lifetime.end.p0(i64 24, ptr %arr)
+ call void @llvm.lifetime.end.p0(ptr %arr)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
define linkonce_odr dso_local ptr @_ZNSt5arrayIiLm6EE5beginEv(ptr %this) {
entry:
@@ -160,7 +160,7 @@ entry:
ret ptr %add.ptr
}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare dso_local void @_Z3usei(i32)
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll b/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll
index dfad534..00453e7 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll
@@ -143,7 +143,7 @@ entry:
%j = alloca i64, align 8
store ptr %data, ptr %data.addr, align 8, !tbaa !3
store i64 %numElems, ptr %numElems.addr, align 8, !tbaa !7
- call void @llvm.lifetime.start.p0(i64 8, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
store i64 0, ptr %i, align 8, !tbaa !7
br label %for.cond
@@ -154,11 +154,11 @@ for.cond:
for.cond.cleanup:
store i32 2, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 8, ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
br label %for.end8
for.body:
- call void @llvm.lifetime.start.p0(i64 8, ptr %j)
+ call void @llvm.lifetime.start.p0(ptr %j)
store i64 0, ptr %j, align 8, !tbaa !7
br label %for.cond1
@@ -170,7 +170,7 @@ for.cond1:
for.cond.cleanup3:
store i32 5, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 8, ptr %j)
+ call void @llvm.lifetime.end.p0(ptr %j)
br label %for.end
for.body4:
@@ -201,7 +201,7 @@ for.end8:
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
define linkonce_odr dso_local noundef nonnull align 4 dereferenceable(4) ptr @_ZNSt6vectorIiSaIiEEixEm(ptr noundef nonnull align 8 dereferenceable(24) %this, i64 noundef %__n) comdat align 2 {
entry:
@@ -217,7 +217,7 @@ entry:
ret ptr %add.ptr
}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!0 = !{i32 1, !"wchar_size", i32 4}
!1 = !{i32 7, !"uwtable", i32 2}
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/loop-vectorizer-noalias.ll b/llvm/test/Transforms/PhaseOrdering/X86/loop-vectorizer-noalias.ll
index 1a4af5a..3c54ed9 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/loop-vectorizer-noalias.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/loop-vectorizer-noalias.ll
@@ -63,10 +63,10 @@ for.end: ; preds = %for.cond.cleanup
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87"}
;.
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr61061.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr61061.ll
index 362708b..0c58705 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/pr61061.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/pr61061.ll
@@ -13,7 +13,7 @@ define <2 x i64> @PR61061(<2 x i64> noundef %vect) {
; CHECK-NEXT: ret <2 x i64> [[TMP1]]
;
%ptr = alloca <2 x i64>, align 16
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %ptr)
+ call void @llvm.lifetime.start.p0(ptr nonnull %ptr)
%bc0 = bitcast <2 x i64> %vect to <16 x i8>
%bc1 = bitcast <2 x i64> %vect to <16 x i8>
%bc2 = bitcast <2 x i64> %vect to <16 x i8>
@@ -62,8 +62,8 @@ define <2 x i64> @PR61061(<2 x i64> noundef %vect) {
store i8 %elt2, ptr %ptr14, align 2
store i8 %elt3, ptr %ptr15, align 1
%base = load <2 x i64>, ptr %ptr, align 16
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %ptr)
+ call void @llvm.lifetime.end.p0(ptr nonnull %ptr)
ret <2 x i64> %base
}
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll b/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll
index be7f4c2..cb37846 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/preserve-access-group.ll
@@ -79,9 +79,9 @@ entry:
store ptr %face_cell, ptr %face_cell.addr, align 8, !tbaa !10
store ptr %x, ptr %x.addr, align 8, !tbaa !10
store ptr %y, ptr %y.addr, align 8, !tbaa !10
- call void @llvm.lifetime.start.p0(i64 4, ptr %il) #3
- call void @llvm.lifetime.start.p0(i64 4, ptr %ir) #3
- call void @llvm.lifetime.start.p0(i64 4, ptr %iface) #3
+ call void @llvm.lifetime.start.p0(ptr %il) #3
+ call void @llvm.lifetime.start.p0(ptr %ir) #3
+ call void @llvm.lifetime.start.p0(ptr %iface) #3
store i32 0, ptr %iface, align 4, !tbaa !6
br label %for.cond
@@ -92,7 +92,7 @@ for.cond:
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup:
- call void @llvm.lifetime.end.p0(i64 4, ptr %iface) #3, !llvm.access.group !12
+ call void @llvm.lifetime.end.p0(ptr %iface) #3, !llvm.access.group !12
br label %for.end
for.body:
@@ -134,12 +134,12 @@ for.inc:
br label %for.cond, !llvm.loop !15
for.end:
- call void @llvm.lifetime.end.p0(i64 4, ptr %ir) #3
- call void @llvm.lifetime.end.p0(i64 4, ptr %il) #3
+ call void @llvm.lifetime.end.p0(ptr %ir) #3
+ call void @llvm.lifetime.end.p0(ptr %il) #3
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
define linkonce_odr noundef nonnull align 8 dereferenceable(8) ptr @max(ptr noundef nonnull align 8 dereferenceable(8) %__a, ptr noundef nonnull align 8 dereferenceable(8) %__b) #2 {
entry:
@@ -170,7 +170,7 @@ return:
ret ptr %6
}
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { mustprogress "target-cpu" = "skylake-avx512" }
attributes #1 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) }
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll b/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll
index f60bc26..69a46b2 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/vdiv-nounroll.ll
@@ -39,7 +39,7 @@ entry:
%i = alloca i32, align 4
store ptr %a, ptr %a.addr, align 8, !tbaa !3
store float %b, ptr %b.addr, align 4, !tbaa !7
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.start.p0(ptr %i) #2
store i32 0, ptr %i, align 4, !tbaa !9
br label %for.cond
@@ -49,7 +49,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2
+ call void @llvm.lifetime.end.p0(ptr %i) #2
br label %for.end
for.body: ; preds = %for.cond
@@ -73,8 +73,8 @@ for.end: ; preds = %for.cond.cleanup
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind ssp uwtable "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" "unsafe-fp-math"="true" }
attributes #1 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll b/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll
index 85f6fce..f6e8fcd 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll
@@ -75,7 +75,7 @@ entry:
%ref.tmp7 = alloca %union.ElementWiseAccess, align 16
%ref.tmp12 = alloca %union.ElementWiseAccess, align 16
store ptr %V, ptr %V.addr, align 8
- call void @llvm.lifetime.start.p0(i64 16, ptr %ref.tmp) #4
+ call void @llvm.lifetime.start.p0(ptr %ref.tmp) #4
%0 = load ptr, ptr %V.addr, align 8
%call = call { double, double } @castToElementWiseAccess_ByVal(ptr noundef nonnull align 16 dereferenceable(16) %0)
%coerce.dive = getelementptr inbounds %union.ElementWiseAccess, ptr %ref.tmp, i32 0, i32 0
@@ -87,7 +87,7 @@ entry:
store double %4, ptr %3, align 8
%call1 = call noundef float @ElementWiseAccess5getAt(ptr noundef nonnull align 16 dereferenceable(16) %ref.tmp, i32 noundef 0)
%vecinit = insertelement <4 x float> undef, float %call1, i32 0
- call void @llvm.lifetime.start.p0(i64 16, ptr %ref.tmp2) #4
+ call void @llvm.lifetime.start.p0(ptr %ref.tmp2) #4
%5 = load ptr, ptr %V.addr, align 8
%call3 = call { double, double } @castToElementWiseAccess_ByVal(ptr noundef nonnull align 16 dereferenceable(16) %5)
%coerce.dive4 = getelementptr inbounds %union.ElementWiseAccess, ptr %ref.tmp2, i32 0, i32 0
@@ -99,7 +99,7 @@ entry:
store double %9, ptr %8, align 8
%call5 = call noundef float @ElementWiseAccess5getAt(ptr noundef nonnull align 16 dereferenceable(16) %ref.tmp2, i32 noundef 1)
%vecinit6 = insertelement <4 x float> %vecinit, float %call5, i32 1
- call void @llvm.lifetime.start.p0(i64 16, ptr %ref.tmp7) #4
+ call void @llvm.lifetime.start.p0(ptr %ref.tmp7) #4
%10 = load ptr, ptr %V.addr, align 8
%call8 = call { double, double } @castToElementWiseAccess_ByVal(ptr noundef nonnull align 16 dereferenceable(16) %10)
%coerce.dive9 = getelementptr inbounds %union.ElementWiseAccess, ptr %ref.tmp7, i32 0, i32 0
@@ -111,7 +111,7 @@ entry:
store double %14, ptr %13, align 8
%call10 = call noundef float @ElementWiseAccess5getAt(ptr noundef nonnull align 16 dereferenceable(16) %ref.tmp7, i32 noundef 2)
%vecinit11 = insertelement <4 x float> %vecinit6, float %call10, i32 2
- call void @llvm.lifetime.start.p0(i64 16, ptr %ref.tmp12) #4
+ call void @llvm.lifetime.start.p0(ptr %ref.tmp12) #4
%15 = load ptr, ptr %V.addr, align 8
%call13 = call { double, double } @castToElementWiseAccess_ByVal(ptr noundef nonnull align 16 dereferenceable(16) %15)
%coerce.dive14 = getelementptr inbounds %union.ElementWiseAccess, ptr %ref.tmp12, i32 0, i32 0
@@ -125,10 +125,10 @@ entry:
%vecinit16 = insertelement <4 x float> %vecinit11, float %call15, i32 3
store <4 x float> %vecinit16, ptr %.compoundliteral, align 16
%20 = load <4 x float>, ptr %.compoundliteral, align 16
- call void @llvm.lifetime.end.p0(i64 16, ptr %ref.tmp12) #4
- call void @llvm.lifetime.end.p0(i64 16, ptr %ref.tmp7) #4
- call void @llvm.lifetime.end.p0(i64 16, ptr %ref.tmp2) #4
- call void @llvm.lifetime.end.p0(i64 16, ptr %ref.tmp) #4
+ call void @llvm.lifetime.end.p0(ptr %ref.tmp12) #4
+ call void @llvm.lifetime.end.p0(ptr %ref.tmp7) #4
+ call void @llvm.lifetime.end.p0(ptr %ref.tmp2) #4
+ call void @llvm.lifetime.end.p0(ptr %ref.tmp) #4
ret <4 x float> %20
}
@@ -144,8 +144,8 @@ entry:
ret { double, double } %1
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #3
define internal noundef nonnull align 16 dereferenceable(16) ptr @castToElementWiseAccess_ByRef(ptr noundef nonnull align 16 dereferenceable(16) %0) #1 {
diff --git a/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll b/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll
index 4d0f039..dd9ead4 100644
--- a/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll
+++ b/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll
@@ -130,8 +130,8 @@ for.end34:
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { nounwind ssp uwtable "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" }
attributes #1 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/PhaseOrdering/dse-ephemeral-value-captures.ll b/llvm/test/Transforms/PhaseOrdering/dse-ephemeral-value-captures.ll
index 1297dbe..9a6cad4 100644
--- a/llvm/test/Transforms/PhaseOrdering/dse-ephemeral-value-captures.ll
+++ b/llvm/test/Transforms/PhaseOrdering/dse-ephemeral-value-captures.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
; RUN: opt -passes='function(dse),cgscc(inline),function(sroa,gvn,sccp)' -S %s | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @llvm.assume(i1 noundef)
@@ -35,10 +35,10 @@ define i32 @test() {
;
entry:
%a = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %a)
+ call void @llvm.lifetime.start.p0(ptr nonnull %a)
store i32 1, ptr %a, align 4
%res = call i1 @check_cond(ptr %a)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %a)
+ call void @llvm.lifetime.end.p0(ptr nonnull %a)
call void @llvm.assume(i1 %res)
ret i32 0
}
diff --git a/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll b/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll
index ae98fe6..c6d1cbd 100644
--- a/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll
+++ b/llvm/test/Transforms/PhaseOrdering/gvn-replacement-vs-hoist.ll
@@ -35,7 +35,7 @@ entry:
%i = alloca i32, align 4
store ptr %a, ptr %a.addr, align 8
store i32 %beam, ptr %beam.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
store i32 0, ptr %i, align 4
br label %for.cond
@@ -45,7 +45,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
br label %for.end
for.body: ; preds = %for.cond
@@ -85,6 +85,6 @@ for.end: ; preds = %for.cond.cleanup
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll b/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll
index cc20233a..84cbad3 100644
--- a/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll
+++ b/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll
@@ -73,23 +73,23 @@ bb:
%i = alloca %0, align 8
%i1 = alloca %0, align 8
%i2 = alloca %0, align 8
- call void @llvm.lifetime.start.p0(i64 24, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %i1, ptr align 8 %arg, i64 24, i1 false)
call void @_Z3gen1S(ptr sret(%0) align 8 %i, ptr byval(%0) align 8 %i1)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %i2, ptr align 8 %i, i64 24, i1 false)
call void @_Z7escape01S(ptr byval(%0) align 8 %i2)
%i9 = load ptr, ptr %i, align 8
- call void @llvm.lifetime.end.p0(i64 24, ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
ret ptr %i9
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
declare dso_local void @_Z7escape01S(ptr byval(%0) align 8)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define dso_local ptr @_Z3bar1S(ptr byval(%0) align 8 %arg) {
; CHECK-LABEL: @_Z3bar1S(
@@ -112,7 +112,7 @@ define dso_local ptr @_Z3bar1S(ptr byval(%0) align 8 %arg) {
bb:
%i = alloca %0, align 8
%i1 = alloca %0, align 8
- call void @llvm.lifetime.start.p0(i64 24, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %i1, ptr align 8 %arg, i64 24, i1 false)
call void @_Z3gen1S(ptr sret(%0) align 8 %i, ptr byval(%0) align 8 %i1)
%i5 = call i32 @_Z4condv()
@@ -133,7 +133,7 @@ bb10:
bb13:
%i15 = load ptr, ptr %i, align 8
- call void @llvm.lifetime.end.p0(i64 24, ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
ret ptr %i15
}
diff --git a/llvm/test/Transforms/PhaseOrdering/lifetime-sanitizer.ll b/llvm/test/Transforms/PhaseOrdering/lifetime-sanitizer.ll
index 1239b18..c5dbc42 100644
--- a/llvm/test/Transforms/PhaseOrdering/lifetime-sanitizer.ll
+++ b/llvm/test/Transforms/PhaseOrdering/lifetime-sanitizer.ll
@@ -7,8 +7,8 @@
; RUN: opt < %s -passes='default<O2>' -S | FileCheck %s --check-prefixes=CHECK,OPT
; RUN: opt < %s -passes="default<O3>" -S | FileCheck %s --check-prefixes=CHECK,OPT
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @foo(ptr nocapture)
define void @asan() sanitize_address {
@@ -16,8 +16,8 @@ entry:
; CHECK-LABEL: @asan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -31,8 +31,8 @@ entry:
; CHECK-LABEL: @hwasan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -46,8 +46,8 @@ entry:
; CHECK-LABEL: @msan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; CHECK: call void @llvm.lifetime.start
; CHECK-NEXT: call void @llvm.lifetime.end
@@ -61,8 +61,8 @@ entry:
; CHECK-LABEL: @no_asan(
%text = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr %text)
- call void @llvm.lifetime.end.p0(i64 1, ptr %text)
+ call void @llvm.lifetime.start.p0(ptr %text)
+ call void @llvm.lifetime.end.p0(ptr %text)
; OPT-NOT: call void @llvm.lifetime
; NOOPT: call void @llvm.lifetime.start
; NOOPT-NEXT: call void @llvm.lifetime.end
diff --git a/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll b/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll
index 45f18dd..ae0e591 100644
--- a/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll
+++ b/llvm/test/Transforms/PhaseOrdering/loop-access-checks.ll
@@ -52,7 +52,7 @@ entry:
%elems.coerce.fca.1.extract = extractvalue [2 x i64] %elems.coerce, 1
%elems.coerce.fca.1.gep = getelementptr inbounds [2 x i64], ptr %elems, i64 0, i64 1
store i64 %elems.coerce.fca.1.extract, ptr %elems.coerce.fca.1.gep, align 8
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %__begin1) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %__begin1) #6
%0 = load ptr, ptr %elems, align 8
%__size_.i.i = getelementptr inbounds %"class.std::__1::span", ptr %elems, i64 0, i32 1
%1 = load i64, ptr %__size_.i.i, align 8
@@ -66,7 +66,7 @@ entry:
br i1 %cmp.not.i.i.i.i, label %error, label %check.2
check.2:
- call void @llvm.lifetime.start.p0(i64 24, ptr nonnull %__end1) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %__end1) #6
%l4 = load ptr, ptr %elems, align 8
%__size_.i.i4 = getelementptr inbounds %"class.std::__1::span", ptr %elems, i64 0, i32 1
%l5 = load i64, ptr %__size_.i.i4, align 8
@@ -90,8 +90,8 @@ for.cond:
br i1 %cmp.i, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %__end1)
- call void @llvm.lifetime.end.p0(i64 24, ptr nonnull %__begin1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %__end1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %__begin1)
ret void
for.body: ; preds = %for.cond
@@ -115,11 +115,11 @@ for.latch:
declare void @error()
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @use(ptr noundef nonnull align 4 dereferenceable(4))
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; -------------------------------------------------------------------------
@@ -160,11 +160,11 @@ entry:
%count = alloca i64, align 8
%i = alloca i64, align 8
store ptr %vec, ptr %vec.addr, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %count)
+ call void @llvm.lifetime.start.p0(ptr %count)
%0 = load ptr, ptr %vec.addr, align 8
%call = call noundef i64 @alloc(ptr noundef nonnull align 8 dereferenceable(24) %0)
store i64 %call, ptr %count, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
store i64 0, ptr %i, align 8
br label %for.cond
@@ -175,7 +175,7 @@ for.cond:
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup:
- call void @llvm.lifetime.end.p0(i64 8, ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
br label %for.end
for.body:
@@ -194,7 +194,7 @@ for.inc:
br label %for.cond
for.end:
- call void @llvm.lifetime.end.p0(i64 8, ptr %count) #5
+ call void @llvm.lifetime.end.p0(ptr %count) #5
ret void
}
@@ -299,11 +299,11 @@ entry:
%count = alloca i64, align 8
%i = alloca i64, align 8
store ptr %vec, ptr %vec.addr, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %count)
+ call void @llvm.lifetime.start.p0(ptr %count)
%0 = load ptr, ptr %vec.addr, align 8
%call = call noundef i64 @alloc(ptr noundef nonnull align 8 dereferenceable(24) %0)
store i64 %call, ptr %count, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
store i64 0, ptr %i, align 8
br label %for.cond
@@ -314,7 +314,7 @@ for.cond:
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup:
- call void @llvm.lifetime.end.p0(i64 8, ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
br label %for.end
for.body:
@@ -333,7 +333,7 @@ for.inc:
br label %for.cond
for.end:
- call void @llvm.lifetime.end.p0(i64 8, ptr %count)
+ call void @llvm.lifetime.end.p0(ptr %count)
ret void
}
@@ -376,7 +376,7 @@ entry:
%k = alloca i32, align 4
store ptr %arr, ptr %arr.addr, align 8
store i32 %len, ptr %len.addr, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #3
+ call void @llvm.lifetime.start.p0(ptr %i) #3
store i32 1, ptr %i, align 4
br label %for.cond
@@ -388,11 +388,11 @@ for.cond: ; preds = %for.inc5, %entry
for.cond.cleanup: ; preds = %for.cond
store i32 2, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #3
+ call void @llvm.lifetime.end.p0(ptr %i) #3
br label %for.end6
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(i64 4, ptr %k) #3
+ call void @llvm.lifetime.start.p0(ptr %k) #3
%2 = load i32, ptr %i, align 4
store i32 %2, ptr %k, align 4
br label %for.cond1
@@ -404,7 +404,7 @@ for.cond1: ; preds = %for.inc, %for.body
for.cond.cleanup3: ; preds = %for.cond1
store i32 5, ptr %cleanup.dest.slot, align 4
- call void @llvm.lifetime.end.p0(i64 4, ptr %k) #3
+ call void @llvm.lifetime.end.p0(ptr %k) #3
br label %for.end
for.body4: ; preds = %for.cond1
diff --git a/llvm/test/Transforms/PhaseOrdering/loop-rotation-vs-common-code-hoisting.ll b/llvm/test/Transforms/PhaseOrdering/loop-rotation-vs-common-code-hoisting.ll
index c6b5e5f..5ff57ea 100644
--- a/llvm/test/Transforms/PhaseOrdering/loop-rotation-vs-common-code-hoisting.ll
+++ b/llvm/test/Transforms/PhaseOrdering/loop-rotation-vs-common-code-hoisting.ll
@@ -38,8 +38,8 @@ declare void @f0()
declare void @f1()
declare void @f2()
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @_Z4loopi(i32 %width) {
; HOIST-LABEL: @_Z4loopi(
@@ -100,7 +100,7 @@ if.then:
br label %return
if.end:
- call void @llvm.lifetime.start.p0(i64 4, ptr %i)
+ call void @llvm.lifetime.start.p0(ptr %i)
store i32 0, ptr %i, align 4
br label %for.cond
@@ -112,7 +112,7 @@ for.cond:
br i1 %cmp1, label %for.body, label %for.cond.cleanup
for.cond.cleanup:
- call void @llvm.lifetime.end.p0(i64 4, ptr %i)
+ call void @llvm.lifetime.end.p0(ptr %i)
br label %for.end
for.body:
diff --git a/llvm/test/Transforms/PhaseOrdering/vector-select.ll b/llvm/test/Transforms/PhaseOrdering/vector-select.ll
index 1bdd135..c228723 100644
--- a/llvm/test/Transforms/PhaseOrdering/vector-select.ll
+++ b/llvm/test/Transforms/PhaseOrdering/vector-select.ll
@@ -19,9 +19,9 @@ define <3 x float> @PR52631(<3 x float> %a, <3 x float> %b, <3 x i32> %c) {
store <4 x float> %extractVec1, ptr %b.addr, align 16
%extractVec3 = shufflevector <3 x i32> %c, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
store <4 x i32> %extractVec3, ptr %c.addr, align 16
- call void @llvm.lifetime.start.p0(i64 16, ptr %zero) #2
+ call void @llvm.lifetime.start.p0(ptr %zero) #2
store <4 x i32> <i32 0, i32 0, i32 0, i32 undef>, ptr %zero, align 16
- call void @llvm.lifetime.start.p0(i64 16, ptr %mask) #2
+ call void @llvm.lifetime.start.p0(ptr %mask) #2
%loadVec4 = load <4 x i32>, ptr %zero, align 16
%extractVec6 = shufflevector <4 x i32> %loadVec4, <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
%loadVec48 = load <4 x i32>, ptr %c.addr, align 16
@@ -30,7 +30,7 @@ define <3 x float> @PR52631(<3 x float> %a, <3 x float> %b, <3 x i32> %c) {
%sext = sext <3 x i1> %cmp to <3 x i32>
%extractVec10 = shufflevector <3 x i32> %sext, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
store <4 x i32> %extractVec10, ptr %mask, align 16
- call void @llvm.lifetime.start.p0(i64 16, ptr %res) #2
+ call void @llvm.lifetime.start.p0(ptr %res) #2
%loadVec413 = load <4 x i32>, ptr %mask, align 16
%extractVec14 = shufflevector <4 x i32> %loadVec413, <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
%loadVec416 = load <4 x float>, ptr %b.addr, align 16
@@ -51,9 +51,9 @@ define <3 x float> @PR52631(<3 x float> %a, <3 x float> %b, <3 x i32> %c) {
%extractVec32 = shufflevector <4 x i32> %loadVec431, <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
%or = or <3 x i32> %and29, %extractVec32
%astype33 = bitcast <3 x i32> %or to <3 x float>
- call void @llvm.lifetime.end.p0(i64 16, ptr %res) #2
- call void @llvm.lifetime.end.p0(i64 16, ptr %mask) #2
- call void @llvm.lifetime.end.p0(i64 16, ptr %zero) #2
+ call void @llvm.lifetime.end.p0(ptr %res) #2
+ call void @llvm.lifetime.end.p0(ptr %mask) #2
+ call void @llvm.lifetime.end.p0(ptr %zero) #2
ret <3 x float> %astype33
}
@@ -112,5 +112,5 @@ for.end:
ret <4 x i32> %min.addr.0
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
diff --git a/llvm/test/Transforms/SROA/alloca-address-space.ll b/llvm/test/Transforms/SROA/alloca-address-space.ll
index 31305c8..941178f 100644
--- a/llvm/test/Transforms/SROA/alloca-address-space.ll
+++ b/llvm/test/Transforms/SROA/alloca-address-space.ll
@@ -140,7 +140,7 @@ define void @addressspace_alloca_lifetime() {
; CHECK-NEXT: ret void
;
%alloca = alloca i8, align 8, addrspace(2)
- call void @llvm.lifetime.start(i64 2, ptr addrspace(2) %alloca)
+ call void @llvm.lifetime.start(ptr addrspace(2) %alloca)
ret void
}
diff --git a/llvm/test/Transforms/SROA/basictest.ll b/llvm/test/Transforms/SROA/basictest.ll
index 3034aaa..15803f7 100644
--- a/llvm/test/Transforms/SROA/basictest.ll
+++ b/llvm/test/Transforms/SROA/basictest.ll
@@ -4,8 +4,8 @@
target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define i32 @test0() {
; CHECK-LABEL: @test0(
@@ -18,21 +18,21 @@ entry:
%a1 = alloca i32
%a2 = alloca float
- call void @llvm.lifetime.start.p0(i64 4, ptr %a1)
+ call void @llvm.lifetime.start.p0(ptr %a1)
store i32 0, ptr %a1
%v1 = load i32, ptr %a1
- call void @llvm.lifetime.end.p0(i64 4, ptr %a1)
+ call void @llvm.lifetime.end.p0(ptr %a1)
- call void @llvm.lifetime.start.p0(i64 4, ptr %a2)
+ call void @llvm.lifetime.start.p0(ptr %a2)
store float 0.0, ptr %a2
%v2 = load float , ptr %a2
%v2.int = bitcast float %v2 to i32
%sum1 = add i32 %v1, %v2.int
- call void @llvm.lifetime.end.p0(i64 4, ptr %a2)
+ call void @llvm.lifetime.end.p0(ptr %a2)
ret i32 %sum1
}
@@ -1102,7 +1102,7 @@ define void @PR14059.1(ptr %d) {
;
entry:
%X.sroa.0.i = alloca double, align 8
- call void @llvm.lifetime.start.p0(i64 -1, ptr %X.sroa.0.i)
+ call void @llvm.lifetime.start.p0(ptr %X.sroa.0.i)
; Store to the low 32-bits...
store i32 0, ptr %X.sroa.0.i, align 8
@@ -1126,7 +1126,7 @@ entry:
%accum.real.i = load double, ptr %d, align 8
%add.r.i = fadd double %accum.real.i, %X.sroa.0.0.load1.i
store double %add.r.i, ptr %d, align 8
- call void @llvm.lifetime.end.p0(i64 -1, ptr %X.sroa.0.i)
+ call void @llvm.lifetime.end.p0(ptr %X.sroa.0.i)
ret void
}
@@ -1812,7 +1812,7 @@ define void @PR25873(ptr %outData) {
;
entry:
%tmpData = alloca %struct.STest, align 8
- call void @llvm.lifetime.start.p0(i64 16, ptr %tmpData)
+ call void @llvm.lifetime.start.p0(ptr %tmpData)
store float 1.230000e+02, ptr %tmpData, align 8
%y = getelementptr inbounds %struct.STest, ptr %tmpData, i64 0, i32 0, i32 1
store float 4.560000e+02, ptr %y, align 4
@@ -1820,7 +1820,7 @@ entry:
%0 = load i64, ptr %tmpData, align 8
store i64 %0, ptr %m_posB, align 8
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %outData, ptr align 4 %tmpData, i64 16, i1 false)
- call void @llvm.lifetime.end.p0(i64 16, ptr %tmpData)
+ call void @llvm.lifetime.end.p0(ptr %tmpData)
ret void
}
@@ -1833,8 +1833,8 @@ define void @PR27999() unnamed_addr {
;
entry-block:
%0 = alloca [2 x i64], align 8
- call void @llvm.lifetime.start.p0(i64 16, ptr %0)
- call void @llvm.lifetime.end.p0(i64 8, ptr %0)
+ call void @llvm.lifetime.start.p0(ptr %0)
+ call void @llvm.lifetime.end.p0(ptr %0)
ret void
}
@@ -1846,7 +1846,7 @@ define void @PR29139() {
bb1:
%e.7.sroa.6.i = alloca i32, align 1
%e.7.sroa.6.0.load81.i = load i32, ptr %e.7.sroa.6.i, align 1
- call void @llvm.lifetime.end.p0(i64 2, ptr %e.7.sroa.6.i)
+ call void @llvm.lifetime.end.p0(ptr %e.7.sroa.6.i)
ret void
}
@@ -1898,8 +1898,8 @@ entry:
ret void
}
-declare void @llvm.lifetime.start.isVoid.i64.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.isVoid.i64.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.isVoid.i64.p0(ptr nocapture)
+declare void @llvm.lifetime.end.isVoid.i64.p0(ptr nocapture)
@array = dso_local global [10 x float] zeroinitializer, align 4
define void @test29(i32 %num, i32 %tid) {
@@ -1931,7 +1931,7 @@ define void @test29(i32 %num, i32 %tid) {
;
entry:
%ra = alloca [10 x float], align 4
- call void @llvm.lifetime.start.isVoid.i64.p0(i64 40, ptr nonnull %ra)
+ call void @llvm.lifetime.start.isVoid.i64.p0(ptr nonnull %ra)
%cmp1 = icmp sgt i32 %num, 0
br i1 %cmp1, label %bb1, label %bb7
@@ -1963,7 +1963,7 @@ bb6:
br label %bb7
bb7:
- call void @llvm.lifetime.end.isVoid.i64.p0(i64 40, ptr nonnull %ra)
+ call void @llvm.lifetime.end.isVoid.i64.p0(ptr nonnull %ra)
ret void
}
diff --git a/llvm/test/Transforms/SROA/dead-inst.ll b/llvm/test/Transforms/SROA/dead-inst.ll
index 44ae821..bf47722 100644
--- a/llvm/test/Transforms/SROA/dead-inst.ll
+++ b/llvm/test/Transforms/SROA/dead-inst.ll
@@ -47,7 +47,7 @@ define void @H(ptr noalias nocapture readnone, [2 x i64], ptr %ptr, i32 signext
; CHECK-NEXT: [[TMP21:%.*]] = phi i64 [ -1, [[TMP12]] ], [ [[TMP20]], [[TMP17]] ]
; CHECK-NEXT: [[TMP22:%.*]] = inttoptr i64 0 to ptr
; CHECK-NEXT: [[TMP23:%.*]] = sub nsw i64 [[TMP21]], [[TMP13]]
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr nonnull [[TMP3]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP3]])
; CHECK-NEXT: [[TMP24:%.*]] = icmp ult i64 [[TMP23]], 2
; CHECK-NEXT: br i1 [[TMP24]], label [[G_EXIT:%.*]], label [[TMP25:%.*]]
; CHECK: 25:
@@ -60,7 +60,7 @@ define void @H(ptr noalias nocapture readnone, [2 x i64], ptr %ptr, i32 signext
; CHECK-NEXT: call void @D(ptr nonnull sret([[CLASS_B]]) [[TMP3]], ptr nonnull dereferenceable(32) [[PTR2:%.*]])
; CHECK-NEXT: br label [[G_EXIT]]
; CHECK: G.exit:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr nonnull [[TMP3]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TMP3]])
; CHECK-NEXT: br label [[FOO]]
; CHECK: foo:
; CHECK-NEXT: ret void
@@ -105,7 +105,7 @@ a.exit:
%22 = phi i64 [ -1, %12 ], [ %21, %18 ]
%23 = load ptr, ptr %13, align 8
%24 = sub nsw i64 %22, %14
- call void @llvm.lifetime.start.p0(i64 32, ptr nonnull %3)
+ call void @llvm.lifetime.start.p0(ptr nonnull %3)
%25 = icmp ult i64 %24, 2
br i1 %25, label %G.exit, label %26
@@ -122,7 +122,7 @@ a.exit:
br label %G.exit
G.exit:
- call void @llvm.lifetime.end.p0(i64 32, ptr nonnull %3)
+ call void @llvm.lifetime.end.p0(ptr nonnull %3)
br label %foo
foo:
@@ -133,10 +133,10 @@ foo:
declare ptr @memchr(ptr, i32 signext, i64) local_unnamed_addr
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK-MODIFY-CFG: {{.*}}
; CHECK-PRESERVE-CFG: {{.*}}
diff --git a/llvm/test/Transforms/SROA/ignore-droppable.ll b/llvm/test/Transforms/SROA/ignore-droppable.ll
index 9c95dc0..ba581bb 100644
--- a/llvm/test/Transforms/SROA/ignore-droppable.ll
+++ b/llvm/test/Transforms/SROA/ignore-droppable.ll
@@ -3,8 +3,8 @@
; RUN: opt < %s -passes='sroa<modify-cfg>' -S | FileCheck %s --check-prefixes=CHECK,CHECK-MODIFY-CFG
declare void @llvm.assume(i1)
-declare void @llvm.lifetime.start.p0(i64 %size, ptr nocapture %ptr)
-declare void @llvm.lifetime.end.p0(i64 %size, ptr nocapture %ptr)
+declare void @llvm.lifetime.start.p0(ptr nocapture %ptr)
+declare void @llvm.lifetime.end.p0(ptr nocapture %ptr)
define void @positive_assume_uses(ptr %arg) {
; CHECK-LABEL: @positive_assume_uses(
@@ -55,10 +55,10 @@ define void @positive_gep_assume_uses() {
;
%A = alloca {i8, i16}
%B = getelementptr {i8, i16}, ptr %A, i32 0, i32 0
- call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
call void @llvm.assume(i1 true) ["align"(ptr %B, i64 8), "align"(ptr %B, i64 16)]
store {i8, i16} zeroinitializer, ptr %A
- call void @llvm.lifetime.end.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
call void @llvm.assume(i1 true) ["nonnull"(ptr %B), "align"(ptr %B, i64 2)]
ret void
}
@@ -71,10 +71,10 @@ define void @positive_mixed_assume_uses() {
; CHECK-NEXT: ret void
;
%A = alloca i8
- call void @llvm.lifetime.start.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.start.p0(ptr %A)
call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 8), "align"(ptr %A, i64 16)]
store i8 1, ptr %A
- call void @llvm.lifetime.end.p0(i64 2, ptr %A)
+ call void @llvm.lifetime.end.p0(ptr %A)
call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 2), "nonnull"(ptr %A)]
call void @llvm.assume(i1 true) ["nonnull"(ptr %A), "align"(ptr %A, i64 2), "nonnull"(ptr %A)]
ret void
diff --git a/llvm/test/Transforms/SROA/lifetime-intrinsic.ll b/llvm/test/Transforms/SROA/lifetime-intrinsic.ll
index b9e8873..668903d 100644
--- a/llvm/test/Transforms/SROA/lifetime-intrinsic.ll
+++ b/llvm/test/Transforms/SROA/lifetime-intrinsic.ll
@@ -18,14 +18,14 @@ define i16 @with_lifetime(i32 %a, i32 %b) #0 {
; CHECK-NEXT: ret i16 [[RET]]
;
%arr = alloca %i32x2, align 4
- call void @llvm.lifetime.start.p0(i64 8, ptr %arr)
+ call void @llvm.lifetime.start.p0(ptr %arr)
%p1 = getelementptr inbounds %i32x2, ptr %arr, i64 0, i32 0, i32 1
store i32 %a, ptr %arr, align 4
store i32 %b, ptr %p1, align 4
%s0 = load i16, ptr %arr, align 4
%s2 = load i16, ptr %p1, align 4
%ret = add i16 %s0, %s2
- call void @llvm.lifetime.end.p0(i64 8, ptr %arr)
+ call void @llvm.lifetime.end.p0(ptr %arr)
ret i16 %ret
}
@@ -50,9 +50,9 @@ define i16 @no_lifetime(i32 %a, i32 %b) #0 {
ret i16 %ret
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
attributes #0 = { alwaysinline nounwind }
attributes #1 = { argmemonly nounwind }
diff --git a/llvm/test/Transforms/SROA/non-capturing-call-readonly.ll b/llvm/test/Transforms/SROA/non-capturing-call-readonly.ll
index 13808b2..b86f41b 100644
--- a/llvm/test/Transforms/SROA/non-capturing-call-readonly.ll
+++ b/llvm/test/Transforms/SROA/non-capturing-call-readonly.ll
@@ -791,13 +791,13 @@ entry:
ret i32 0
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
define i64 @do_schedule_instrs_for_dce_after_fixups() {
; CHECK-LABEL: @do_schedule_instrs_for_dce_after_fixups(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[C:%.*]] = alloca i64, align 2
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[C]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[C]])
; CHECK-NEXT: store i64 0, ptr [[C]], align 4
; CHECK-NEXT: br label [[IF_END:%.*]]
; CHECK: if.end:
@@ -807,7 +807,7 @@ define i64 @do_schedule_instrs_for_dce_after_fixups() {
;
entry:
%c = alloca i64, align 2
- call void @llvm.lifetime.start.p0(i64 1, ptr %c)
+ call void @llvm.lifetime.start.p0(ptr %c)
store i64 0, ptr %c
br label %if.end
diff --git a/llvm/test/Transforms/SROA/pr26972.ll b/llvm/test/Transforms/SROA/pr26972.ll
index a2872c7..526db3c 100644
--- a/llvm/test/Transforms/SROA/pr26972.ll
+++ b/llvm/test/Transforms/SROA/pr26972.ll
@@ -12,11 +12,11 @@ define void @fn1() {
; CHECK-NEXT: ret void
;
%a = alloca [1073741825 x i32], align 16
- call void @llvm.lifetime.end.p0(i64 4294967300, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK-MODIFY-CFG: {{.*}}
; CHECK-PRESERVE-CFG: {{.*}}
diff --git a/llvm/test/Transforms/SROA/readonlynocapture.ll b/llvm/test/Transforms/SROA/readonlynocapture.ll
index 5752fadd..b6f7b1f 100644
--- a/llvm/test/Transforms/SROA/readonlynocapture.ll
+++ b/llvm/test/Transforms/SROA/readonlynocapture.ll
@@ -284,25 +284,25 @@ define void @incompletestruct(i1 %b, i1 %c) {
; CHECK-LABEL: @incompletestruct(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[LII:%.*]] = alloca [[STRUCT_LOADIMMEDIATEINFO:%.*]], align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[LII]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[LII]])
; CHECK-NEXT: [[BF_CLEAR4:%.*]] = and i32 undef, -262144
; CHECK-NEXT: [[BF_SET5:%.*]] = select i1 [[B:%.*]], i32 196608, i32 131072
; CHECK-NEXT: [[BF_SET12:%.*]] = or disjoint i32 [[BF_SET5]], [[BF_CLEAR4]]
; CHECK-NEXT: store i32 [[BF_SET12]], ptr [[LII]], align 4
; CHECK-NEXT: call void @callee(ptr [[LII]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[LII]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[LII]])
; CHECK-NEXT: ret void
;
entry:
%LII = alloca %struct.LoadImmediateInfo, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %LII)
+ call void @llvm.lifetime.start.p0(ptr nonnull %LII)
%bf.load = load i32, ptr %LII, align 4
%bf.clear4 = and i32 %bf.load, -262144
%bf.set5 = select i1 %b, i32 196608, i32 131072
%bf.set12 = or disjoint i32 %bf.set5, %bf.clear4
store i32 %bf.set12, ptr %LII, align 4
call void @callee(ptr %LII)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %LII)
+ call void @llvm.lifetime.end.p0(ptr nonnull %LII)
ret void
}
@@ -312,13 +312,13 @@ define void @incompletestruct_bb(i1 %b, i1 %c) {
; CHECK-NEXT: [[LII:%.*]] = alloca [[STRUCT_LOADIMMEDIATEINFO:%.*]], align 4
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[LII]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[LII]])
; CHECK-NEXT: [[BF_CLEAR4:%.*]] = and i32 undef, -262144
; CHECK-NEXT: [[BF_SET5:%.*]] = select i1 [[B:%.*]], i32 196608, i32 131072
; CHECK-NEXT: [[BF_SET12:%.*]] = or disjoint i32 [[BF_SET5]], [[BF_CLEAR4]]
; CHECK-NEXT: store i32 [[BF_SET12]], ptr [[LII]], align 4
; CHECK-NEXT: call void @callee(ptr [[LII]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[LII]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[LII]])
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: ret void
@@ -328,14 +328,14 @@ entry:
br i1 %c, label %if.then, label %if.end
if.then: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %LII)
+ call void @llvm.lifetime.start.p0(ptr nonnull %LII)
%bf.load = load i32, ptr %LII, align 4
%bf.clear4 = and i32 %bf.load, -262144
%bf.set5 = select i1 %b, i32 196608, i32 131072
%bf.set12 = or disjoint i32 %bf.set5, %bf.clear4
store i32 %bf.set12, ptr %LII, align 4
call void @callee(ptr %LII)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %LII)
+ call void @llvm.lifetime.end.p0(ptr nonnull %LII)
br label %if.end
if.end: ; preds = %if.then, %entry
@@ -459,35 +459,35 @@ define i32 @provenance_only_capture() {
define i32 @simple_with_lifetimes() {
; CHECK-LABEL: @simple_with_lifetimes(
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: store i32 0, ptr [[A]], align 4
; CHECK-NEXT: call void @callee(ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret i32 0
;
%a = alloca i32
- call void @llvm.lifetime.start(i64 4, ptr %a)
+ call void @llvm.lifetime.start(ptr %a)
store i32 0, ptr %a
call void @callee(ptr %a)
%l1 = load i32, ptr %a
- call void @llvm.lifetime.end(i64 4, ptr %a)
+ call void @llvm.lifetime.end(ptr %a)
ret i32 %l1
}
define i32 @twoalloc_with_lifetimes() {
; CHECK-LABEL: @twoalloc_with_lifetimes(
; CHECK-NEXT: [[A:%.*]] = alloca { i32, i32 }, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
; CHECK-NEXT: store i32 0, ptr [[A]], align 4
; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[A]], i32 1
; CHECK-NEXT: store i32 1, ptr [[B]], align 4
; CHECK-NEXT: call void @callee(ptr [[A]])
; CHECK-NEXT: [[R:%.*]] = add i32 0, 1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret i32 [[R]]
;
%a = alloca {i32, i32}
- call void @llvm.lifetime.start(i64 8, ptr %a)
+ call void @llvm.lifetime.start(ptr %a)
store i32 0, ptr %a
%b = getelementptr i32, ptr %a, i32 1
store i32 1, ptr %b
@@ -495,7 +495,7 @@ define i32 @twoalloc_with_lifetimes() {
%l1 = load i32, ptr %a
%l2 = load i32, ptr %b
%r = add i32 %l1, %l2
- call void @llvm.lifetime.end(i64 8, ptr %a)
+ call void @llvm.lifetime.end(ptr %a)
ret i32 %r
}
diff --git a/llvm/test/Transforms/SROA/select-load.ll b/llvm/test/Transforms/SROA/select-load.ll
index 9de7650..359ecaa 100644
--- a/llvm/test/Transforms/SROA/select-load.ll
+++ b/llvm/test/Transforms/SROA/select-load.ll
@@ -118,7 +118,7 @@ define i32 @interfering_lifetime(ptr %data, i64 %indvars.iv) {
%min = alloca i32, align 4
%arrayidx = getelementptr inbounds i32, ptr %data, i64 %indvars.iv
%i1 = load i32, ptr %arrayidx, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %min)
+ call void @llvm.lifetime.start.p0(ptr %min)
store i32 0, ptr %min, align 4
%cmp.i.i = icmp slt i32 %i1, 0
%__b.__a.i.i = select i1 %cmp.i.i, ptr %min, ptr %arrayidx
@@ -132,9 +132,9 @@ define i32 @clamp_load_to_constant_range(ptr %data, i64 %indvars.iv) {
; CHECK-PRESERVE-CFG-NEXT: [[MIN:%.*]] = alloca i32, align 4
; CHECK-PRESERVE-CFG-NEXT: [[MAX:%.*]] = alloca i32, align 4
; CHECK-PRESERVE-CFG-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[DATA:%.*]], i64 [[INDVARS_IV:%.*]]
-; CHECK-PRESERVE-CFG-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[MIN]])
+; CHECK-PRESERVE-CFG-NEXT: call void @llvm.lifetime.start.p0(ptr [[MIN]])
; CHECK-PRESERVE-CFG-NEXT: store i32 0, ptr [[MIN]], align 4
-; CHECK-PRESERVE-CFG-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[MAX]])
+; CHECK-PRESERVE-CFG-NEXT: call void @llvm.lifetime.start.p0(ptr [[MAX]])
; CHECK-PRESERVE-CFG-NEXT: store i32 4095, ptr [[MAX]], align 4
; CHECK-PRESERVE-CFG-NEXT: [[I1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-PRESERVE-CFG-NEXT: [[CMP_I_I:%.*]] = icmp slt i32 [[I1]], 0
@@ -167,9 +167,9 @@ define i32 @clamp_load_to_constant_range(ptr %data, i64 %indvars.iv) {
%min = alloca i32, align 4
%max = alloca i32, align 4
%arrayidx = getelementptr inbounds i32, ptr %data, i64 %indvars.iv
- call void @llvm.lifetime.start.p0(i64 4, ptr %min)
+ call void @llvm.lifetime.start.p0(ptr %min)
store i32 0, ptr %min, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %max)
+ call void @llvm.lifetime.start.p0(ptr %max)
store i32 4095, ptr %max, align 4
%i1 = load i32, ptr %arrayidx, align 4
%cmp.i.i = icmp slt i32 %i1, 0
@@ -482,6 +482,6 @@ define void @load_of_select_with_noundef_nonnull(ptr %buffer, i1 %b) {
; Ensure that the branch metadata is reversed to match the reversals above.
-declare void @llvm.lifetime.start.p0(i64, ptr )
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr )
+declare void @llvm.lifetime.end.p0(ptr)
declare i32 @llvm.smax.i32(i32, i32)
diff --git a/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll b/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll
index 561315b..60228a4 100644
--- a/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll
+++ b/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll
@@ -5,10 +5,10 @@
target datalayout = "e-p:64:32-i64:32-v32:32-n32-S64"
; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #0
+declare void @llvm.lifetime.start.p0(ptr nocapture) #0
; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0
+declare void @llvm.lifetime.end.p0(ptr nocapture) #0
define void @wombat(<4 x float> %arg1) {
; CHECK-LABEL: @wombat(
@@ -19,10 +19,10 @@ define void @wombat(<4 x float> %arg1) {
;
bb:
%tmp = alloca <4 x float>, align 16
- call void @llvm.lifetime.start.p0(i64 16, ptr %tmp)
+ call void @llvm.lifetime.start.p0(ptr %tmp)
store <4 x float> %arg1, ptr %tmp, align 16
%tmp18 = load <3 x float>, ptr %tmp
- call void @llvm.lifetime.end.p0(i64 16, ptr %tmp)
+ call void @llvm.lifetime.end.p0(ptr %tmp)
call void @wombat3(<3 x float> %tmp18)
ret void
}
diff --git a/llvm/test/Transforms/SROA/vector-promotion.ll b/llvm/test/Transforms/SROA/vector-promotion.ll
index ffa758e..682e8e3 100644
--- a/llvm/test/Transforms/SROA/vector-promotion.ll
+++ b/llvm/test/Transforms/SROA/vector-promotion.ll
@@ -1534,7 +1534,7 @@ bb.5:
}
declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.end.p0(ptr)
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK-MODIFY-CFG: {{.*}}
; CHECK-PRESERVE-CFG: {{.*}}
diff --git a/llvm/test/Transforms/SafeStack/ARM/debug.ll b/llvm/test/Transforms/SafeStack/ARM/debug.ll
index a8c534c..207475a 100644
--- a/llvm/test/Transforms/SafeStack/ARM/debug.ll
+++ b/llvm/test/Transforms/SafeStack/ARM/debug.ll
@@ -29,15 +29,15 @@ entry:
define void @f() local_unnamed_addr #1 !dbg !27 {
entry:
%c = alloca [16 x i8], align 1
- call void @llvm.lifetime.start.p0(i64 16, ptr nonnull %c) #5, !dbg !35
+ call void @llvm.lifetime.start.p0(ptr nonnull %c) #5, !dbg !35
call void @llvm.dbg.declare(metadata ptr %c, metadata !31, metadata !DIExpression()), !dbg !36
call void @Capture(ptr nonnull %c) #5, !dbg !37
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %c) #5, !dbg !38
+ call void @llvm.lifetime.end.p0(ptr nonnull %c) #5, !dbg !38
ret void, !dbg !38
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: nounwind readnone speculatable
declare void @llvm.dbg.declare(metadata, metadata, metadata) #3
@@ -45,7 +45,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) #3
declare void @Capture(ptr) local_unnamed_addr #4
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
attributes #0 = { norecurse nounwind readonly safestack "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv7-a,+dsp,+neon,+vfp3,-thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind safestack "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv7-a,+dsp,+neon,+vfp3,-thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/SafeStack/X86/call.ll b/llvm/test/Transforms/SafeStack/X86/call.ll
index 9592b33..f14e676 100644
--- a/llvm/test/Transforms/SafeStack/X86/call.ll
+++ b/llvm/test/Transforms/SafeStack/X86/call.ll
@@ -152,8 +152,8 @@ define void @call_lifetime(ptr %p) {
; CHECK: ret void
entry:
%q = alloca [100 x i8], align 16
- call void @llvm.lifetime.start.p0(i64 100, ptr %q)
- call void @llvm.lifetime.end.p0(i64 100, ptr %q)
+ call void @llvm.lifetime.start.p0(ptr %q)
+ call void @llvm.lifetime.end.p0(ptr %q)
ret void
}
@@ -167,5 +167,5 @@ declare void @readnone0(ptr nocapture readnone, ptr nocapture)
declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind argmemonly
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind argmemonly
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind argmemonly
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind argmemonly
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind argmemonly
diff --git a/llvm/test/Transforms/SafeStack/X86/coloring-ssp.ll b/llvm/test/Transforms/SafeStack/X86/coloring-ssp.ll
index 8ff369e..5192e47 100644
--- a/llvm/test/Transforms/SafeStack/X86/coloring-ssp.ll
+++ b/llvm/test/Transforms/SafeStack/X86/coloring-ssp.ll
@@ -14,19 +14,19 @@ entry:
%x = alloca i64, align 8
%y = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -16
call void @capture64(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -16
call void @capture64(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @capture64(ptr)
diff --git a/llvm/test/Transforms/SafeStack/X86/coloring.ll b/llvm/test/Transforms/SafeStack/X86/coloring.ll
index 22e1487..288ae00 100644
--- a/llvm/test/Transforms/SafeStack/X86/coloring.ll
+++ b/llvm/test/Transforms/SafeStack/X86/coloring.ll
@@ -11,30 +11,30 @@ entry:
%x = alloca i32, align 4
%x1 = alloca i32, align 4
%x2 = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: %[[A1:.*]] = getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture(ptr nonnull %[[A1]])
call void @capture(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 4, ptr %x)
- call void @llvm.lifetime.start.p0(i64 4, ptr %x1)
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x1)
; CHECK: %[[B1:.*]] = getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture(ptr nonnull %[[B1]])
call void @capture(ptr nonnull %x1)
- call void @llvm.lifetime.end.p0(i64 4, ptr %x1)
- call void @llvm.lifetime.start.p0(i64 4, ptr %x2)
+ call void @llvm.lifetime.end.p0(ptr %x1)
+ call void @llvm.lifetime.start.p0(ptr %x2)
; CHECK: %[[C1:.*]] = getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture(ptr nonnull %[[C1]])
call void @capture(ptr nonnull %x2)
- call void @llvm.lifetime.end.p0(i64 4, ptr %x2)
+ call void @llvm.lifetime.end.p0(ptr %x2)
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @capture(ptr)
diff --git a/llvm/test/Transforms/SafeStack/X86/coloring2.ll b/llvm/test/Transforms/SafeStack/X86/coloring2.ll
index ae5f375..a4157cb 100644
--- a/llvm/test/Transforms/SafeStack/X86/coloring2.ll
+++ b/llvm/test/Transforms/SafeStack/X86/coloring2.ll
@@ -14,21 +14,21 @@ entry:
%y = alloca i32, align 4
%z = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
call void @capture32(ptr %z)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
ret void
}
@@ -42,11 +42,11 @@ entry:
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
call void @capture32(ptr %y)
@@ -65,21 +65,21 @@ entry:
%y = alloca i32, align 4
%z = alloca i64, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %z)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
call void @capture64(ptr %z)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
ret void
}
@@ -95,9 +95,9 @@ entry:
%z = alloca i64, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %z)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -16
call void @capture32(ptr %x)
@@ -108,9 +108,9 @@ entry:
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
call void @capture64(ptr %z)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %z)
ret void
}
@@ -147,8 +147,8 @@ entry:
%z = alloca i64, align 8
%z1 = alloca i64, align 8
%z2 = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x1)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x2)
+ call void @llvm.lifetime.start.p0(ptr %x1)
+ call void @llvm.lifetime.start.p0(ptr %x2)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
; CHECK: call void @capture64(
call void @capture64(ptr nonnull %x1)
@@ -158,62 +158,62 @@ entry:
br i1 %a, label %if.then, label %if.else4
if.then: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -24
; CHECK: call void @capture64(
call void @capture64(ptr nonnull %y)
br i1 %b, label %if.then3, label %if.else
if.then3: ; preds = %if.then
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y1)
+ call void @llvm.lifetime.start.p0(ptr %y1)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -32
; CHECK: call void @capture64(
call void @capture64(ptr nonnull %y1)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y1)
+ call void @llvm.lifetime.end.p0(ptr %y1)
br label %if.end
if.else: ; preds = %if.then
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y2)
+ call void @llvm.lifetime.start.p0(ptr %y2)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -32
; CHECK: call void @capture64(
call void @capture64(ptr nonnull %y2)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y2)
+ call void @llvm.lifetime.end.p0(ptr %y2)
br label %if.end
if.end: ; preds = %if.else, %if.then3
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
br label %if.end9
if.else4: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.start.p0(ptr %z)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -24
; CHECK: call void @capture64(
call void @capture64(ptr nonnull %z)
br i1 %b, label %if.then6, label %if.else7
if.then6: ; preds = %if.else4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z1)
+ call void @llvm.lifetime.start.p0(ptr %z1)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -32
; CHECK: call void @capture64(
call void @capture64(ptr nonnull %z1)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z1)
+ call void @llvm.lifetime.end.p0(ptr %z1)
br label %if.end8
if.else7: ; preds = %if.else4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %z2)
+ call void @llvm.lifetime.start.p0(ptr %z2)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -32
; CHECK: call void @capture64(
call void @capture64(ptr nonnull %z2)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z2)
+ call void @llvm.lifetime.end.p0(ptr %z2)
br label %if.end8
if.end8: ; preds = %if.else7, %if.then6
- call void @llvm.lifetime.end.p0(i64 -1, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
br label %if.end9
if.end9: ; preds = %if.end8, %if.end
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x2)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x1)
+ call void @llvm.lifetime.end.p0(ptr %x2)
+ call void @llvm.lifetime.end.p0(ptr %x1)
ret void
}
@@ -225,21 +225,21 @@ entry:
; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %x)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
; CHECK: call void @capture32(
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %x)
ret void
bb3:
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
ret void
}
@@ -250,18 +250,18 @@ entry:
; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %y)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
ret void
bb3:
ret void
@@ -275,14 +275,14 @@ entry:
; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %y)
@@ -299,14 +299,14 @@ entry:
; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16
%x = alloca i32, align 4
%y = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %x)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %x)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %y)
@@ -326,10 +326,10 @@ entry:
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
; CHECK: call void @capture32(
call void @capture32(ptr %x)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %x)
br i1 %d, label %bb2, label %bb3
bb2:
- call void @llvm.lifetime.start.p0(i64 -1, ptr %y)
+ call void @llvm.lifetime.start.p0(ptr %y)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
; CHECK: call void @capture32(
call void @capture32(ptr %y)
@@ -347,26 +347,26 @@ entry:
%B.i2 = alloca [100 x i32], align 4
%A.i = alloca [100 x i32], align 4
%B.i = alloca [100 x i32], align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i)
+ call void @llvm.lifetime.start.p0(ptr %A.i)
+ call void @llvm.lifetime.start.p0(ptr %B.i)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -400
; CHECK: call void @capture100x32(
call void @capture100x32(ptr %A.i)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -800
; CHECK: call void @capture100x32(
call void @capture100x32(ptr %B.i)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i1)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i2)
+ call void @llvm.lifetime.end.p0(ptr %A.i)
+ call void @llvm.lifetime.end.p0(ptr %B.i)
+ call void @llvm.lifetime.start.p0(ptr %A.i1)
+ call void @llvm.lifetime.start.p0(ptr %B.i2)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -400
; CHECK: call void @capture100x32(
call void @capture100x32(ptr %A.i1)
; CHECK: getelementptr i8, ptr %[[USP]], i32 -800
; CHECK: call void @capture100x32(
call void @capture100x32(ptr %B.i2)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i1)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i2)
+ call void @llvm.lifetime.end.p0(ptr %A.i1)
+ call void @llvm.lifetime.end.p0(ptr %B.i2)
ret void
}
@@ -378,11 +378,11 @@ entry:
%buf1 = alloca i8, i32 100000, align 16
%buf2 = alloca i8, i32 100000, align 16
- call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %buf1)
+ call void @llvm.lifetime.start.p0(ptr %buf1)
+ call void @llvm.lifetime.end.p0(ptr %buf1)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %buf1)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %buf2)
+ call void @llvm.lifetime.start.p0(ptr %buf1)
+ call void @llvm.lifetime.start.p0(ptr %buf2)
call void @capture8(ptr %buf1)
call void @capture8(ptr %buf2)
ret void
@@ -404,12 +404,12 @@ entry:
%B.i2 = alloca [100 x i32], align 4
%A.i = alloca [100 x i32], align 4
%B.i = alloca [100 x i32], align 4
- call void @llvm.lifetime.start.p0(i64 -1, ptr %A.i) nounwind
- call void @llvm.lifetime.start.p0(i64 -1, ptr %B.i) nounwind
+ call void @llvm.lifetime.start.p0(ptr %A.i) nounwind
+ call void @llvm.lifetime.start.p0(ptr %B.i) nounwind
call void @capture100x32(ptr %A.i)
call void @capture100x32(ptr %B.i)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %A.i) nounwind
- call void @llvm.lifetime.end.p0(i64 -1, ptr %B.i) nounwind
+ call void @llvm.lifetime.end.p0(ptr %A.i) nounwind
+ call void @llvm.lifetime.end.p0(ptr %B.i) nounwind
br label %block2
block2:
@@ -429,13 +429,13 @@ entry:
%a.i = alloca [4 x %struct.Klass], align 16
%b.i = alloca [4 x %struct.Klass], align 16
; I am used outside the lifetime zone below:
- call void @llvm.lifetime.start.p0(i64 -1, ptr %a.i)
- call void @llvm.lifetime.start.p0(i64 -1, ptr %b.i)
+ call void @llvm.lifetime.start.p0(ptr %a.i)
+ call void @llvm.lifetime.start.p0(ptr %b.i)
call void @capture8(ptr %a.i)
call void @capture8(ptr %b.i)
%z3 = load i32, ptr %a.i, align 16
- call void @llvm.lifetime.end.p0(i64 -1, ptr %a.i)
- call void @llvm.lifetime.end.p0(i64 -1, ptr %b.i)
+ call void @llvm.lifetime.end.p0(ptr %a.i)
+ call void @llvm.lifetime.end.p0(ptr %b.i)
ret i32 %z3
}
@@ -445,12 +445,12 @@ entry:
; CHECK: %[[USP:.*]] = load ptr, ptr @__safestack_unsafe_stack_ptr
; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16
%x = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) nounwind
+ call void @llvm.lifetime.start.p0(ptr %x) nounwind
br label %l2
l2:
call void @capture8(ptr %x)
- call void @llvm.lifetime.end.p0(i64 4, ptr %x) nounwind
+ call void @llvm.lifetime.end.p0(ptr %x) nounwind
br label %l2
}
@@ -463,25 +463,25 @@ entry:
; CHECK-NEXT: getelementptr i8, ptr %[[USP]], i32 -16
%x = alloca i8, align 4
%y = alloca i8, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) nounwind
+ call void @llvm.lifetime.start.p0(ptr %x) nounwind
br label %l2
l2:
; CHECK: getelementptr i8, ptr %[[USP]], i32 -8
- call void @llvm.lifetime.start.p0(i64 4, ptr %y) nounwind
+ call void @llvm.lifetime.start.p0(ptr %y) nounwind
call void @capture8(ptr %y)
- call void @llvm.lifetime.end.p0(i64 4, ptr %y) nounwind
+ call void @llvm.lifetime.end.p0(ptr %y) nounwind
; CHECK: getelementptr i8, ptr %[[USP]], i32 -4
- call void @llvm.lifetime.start.p0(i64 4, ptr %x) nounwind
+ call void @llvm.lifetime.start.p0(ptr %x) nounwind
call void @capture8(ptr %x)
br label %l2
}
attributes #0 = { safestack }
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @capture8(ptr)
declare void @capture32(ptr)
declare void @capture64(ptr)
diff --git a/llvm/test/Transforms/SafeStack/X86/debug-loc2.ll b/llvm/test/Transforms/SafeStack/X86/debug-loc2.ll
index 7a1fdc0..e60522f 100644
--- a/llvm/test/Transforms/SafeStack/X86/debug-loc2.ll
+++ b/llvm/test/Transforms/SafeStack/X86/debug-loc2.ll
@@ -43,12 +43,12 @@ entry:
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @capture(ptr) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind readnone
declare void @llvm.dbg.value(metadata, metadata, metadata) #3
diff --git a/llvm/test/Transforms/SafeStack/X86/layout-frag.ll b/llvm/test/Transforms/SafeStack/X86/layout-frag.ll
index b858fd6..8a5362b 100644
--- a/llvm/test/Transforms/SafeStack/X86/layout-frag.ll
+++ b/llvm/test/Transforms/SafeStack/X86/layout-frag.ll
@@ -13,16 +13,16 @@ entry:
%x2 = alloca i64, align 8
- call void @llvm.lifetime.start.p0(i64 8, ptr %x0)
+ call void @llvm.lifetime.start.p0(ptr %x0)
call void @capture64(ptr %x0)
- call void @llvm.lifetime.end.p0(i64 8, ptr %x0)
+ call void @llvm.lifetime.end.p0(ptr %x0)
- call void @llvm.lifetime.start.p0(i64 1, ptr %x1)
- call void @llvm.lifetime.start.p0(i64 8, ptr %x2)
+ call void @llvm.lifetime.start.p0(ptr %x1)
+ call void @llvm.lifetime.start.p0(ptr %x2)
call void @capture8(ptr %x1)
call void @capture64(ptr %x2)
- call void @llvm.lifetime.end.p0(i64 1, ptr %x1)
- call void @llvm.lifetime.end.p0(i64 8, ptr %x2)
+ call void @llvm.lifetime.end.p0(ptr %x1)
+ call void @llvm.lifetime.end.p0(ptr %x2)
; Test that i64 allocas share space.
; CHECK: getelementptr i8, ptr %unsafe_stack_ptr, i32 -8
@@ -32,7 +32,7 @@ entry:
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare void @capture8(ptr)
declare void @capture64(ptr)
diff --git a/llvm/test/Transforms/SafeStack/X86/no-crash-on-lifetime.ll b/llvm/test/Transforms/SafeStack/X86/no-crash-on-lifetime.ll
index 76c638e..c01ca2f 100644
--- a/llvm/test/Transforms/SafeStack/X86/no-crash-on-lifetime.ll
+++ b/llvm/test/Transforms/SafeStack/X86/no-crash-on-lifetime.ll
@@ -9,9 +9,9 @@ define dso_local void @_ZN1s1tE1F(ptr byval(%class.F) %g) local_unnamed_addr saf
entry:
%ref.tmp.i.i.i = alloca i64, align 1
call void undef(ptr %g)
- call void @llvm.lifetime.start.p0(i64 3, ptr %ref.tmp.i.i.i)
+ call void @llvm.lifetime.start.p0(ptr %ref.tmp.i.i.i)
ret void
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
diff --git a/llvm/test/Transforms/SampleProfile/csspgo-import-list-callee-samples.ll b/llvm/test/Transforms/SampleProfile/csspgo-import-list-callee-samples.ll
index ba66548..3ae0aea 100644
--- a/llvm/test/Transforms/SampleProfile/csspgo-import-list-callee-samples.ll
+++ b/llvm/test/Transforms/SampleProfile/csspgo-import-list-callee-samples.ll
@@ -61,10 +61,10 @@ entry:
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.start.p0(ptr nocapture) #3
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.end.p0(ptr nocapture) #3
; Function Attrs: nounwind uwtable
define dso_local i32 @main() #0 !dbg !28 {
diff --git a/llvm/test/Transforms/SampleProfile/entry_counts_cold.ll b/llvm/test/Transforms/SampleProfile/entry_counts_cold.ll
index 5fe80db..c7617c1 100644
--- a/llvm/test/Transforms/SampleProfile/entry_counts_cold.ll
+++ b/llvm/test/Transforms/SampleProfile/entry_counts_cold.ll
@@ -43,7 +43,7 @@ entry:
%a = alloca i32, align 4
store ptr %p, ptr %p.addr, align 8, !tbaa !15
call void @llvm.dbg.declare(metadata ptr %p.addr, metadata !33, metadata !DIExpression()), !dbg !35
- call void @llvm.lifetime.start.p0(i64 4, ptr %a) #4, !dbg !36
+ call void @llvm.lifetime.start.p0(ptr %a) #4, !dbg !36
call void @llvm.dbg.declare(metadata ptr %a, metadata !34, metadata !DIExpression()), !dbg !37
%0 = load ptr, ptr %p.addr, align 8, !dbg !38, !tbaa !15
%arrayidx = getelementptr inbounds i32, ptr %0, i64 3, !dbg !38
@@ -58,7 +58,7 @@ entry:
store i32 %call, ptr %a, align 4, !dbg !43, !tbaa !25
%5 = load i32, ptr %a, align 4, !dbg !44, !tbaa !25
%add2 = add nsw i32 %5, 1, !dbg !45
- call void @llvm.lifetime.end.p0(i64 4, ptr %a) #4, !dbg !46
+ call void @llvm.lifetime.end.p0(ptr %a) #4, !dbg !46
ret i32 %add2, !dbg !47
}
@@ -86,10 +86,10 @@ entry:
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
declare void @baz(...) #3
diff --git a/llvm/test/Transforms/SampleProfile/entry_counts_missing_dbginfo.ll b/llvm/test/Transforms/SampleProfile/entry_counts_missing_dbginfo.ll
index b8e1064..0e62921 100644
--- a/llvm/test/Transforms/SampleProfile/entry_counts_missing_dbginfo.ll
+++ b/llvm/test/Transforms/SampleProfile/entry_counts_missing_dbginfo.ll
@@ -53,7 +53,7 @@ entry:
%a = alloca i32, align 4
store ptr %p, ptr %p.addr, align 8, !tbaa !15
call void @llvm.dbg.declare(metadata ptr %p.addr, metadata !33, metadata !DIExpression()), !dbg !35
- call void @llvm.lifetime.start.p0(i64 4, ptr %a) #4, !dbg !36
+ call void @llvm.lifetime.start.p0(ptr %a) #4, !dbg !36
call void @llvm.dbg.declare(metadata ptr %a, metadata !34, metadata !DIExpression()), !dbg !37
%0 = load ptr, ptr %p.addr, align 8, !dbg !38, !tbaa !15
%arrayidx = getelementptr inbounds i32, ptr %0, i64 3, !dbg !38
@@ -68,7 +68,7 @@ entry:
store i32 %call, ptr %a, align 4, !dbg !43, !tbaa !25
%5 = load i32, ptr %a, align 4, !dbg !44, !tbaa !25
%add2 = add nsw i32 %5, 1, !dbg !45
- call void @llvm.lifetime.end.p0(i64 4, ptr %a) #4, !dbg !46
+ call void @llvm.lifetime.end.p0(ptr %a) #4, !dbg !46
ret i32 %add2, !dbg !47
}
@@ -96,10 +96,10 @@ entry:
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
declare void @baz(...) #3
diff --git a/llvm/test/Transforms/SampleProfile/non-probe-stale-profile-matching.ll b/llvm/test/Transforms/SampleProfile/non-probe-stale-profile-matching.ll
index 3ca94a4..2b091a1 100644
--- a/llvm/test/Transforms/SampleProfile/non-probe-stale-profile-matching.ll
+++ b/llvm/test/Transforms/SampleProfile/non-probe-stale-profile-matching.ll
@@ -151,10 +151,10 @@ for.end: ; preds = %cleanup, %if.then
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.start.p0(ptr nocapture) #3
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.end.p0(ptr nocapture) #3
attributes #0 = { noinline nounwind uwtable "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "use-sample-profile" }
attributes #1 = { alwaysinline nounwind uwtable "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "use-sample-profile" }
diff --git a/llvm/test/Transforms/SampleProfile/profile-mismatch.ll b/llvm/test/Transforms/SampleProfile/profile-mismatch.ll
index 42bc1b8..0a1b896 100644
--- a/llvm/test/Transforms/SampleProfile/profile-mismatch.ll
+++ b/llvm/test/Transforms/SampleProfile/profile-mismatch.ll
@@ -43,13 +43,13 @@ define dso_local i32 @foo(i32 noundef %x) #0 !dbg !12 {
entry:
%y = alloca i32, align 4
call void @llvm.dbg.value(metadata i32 %x, metadata !16, metadata !DIExpression()), !dbg !18
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %y), !dbg !19
+ call void @llvm.lifetime.start.p0(ptr nonnull %y), !dbg !19
call void @llvm.dbg.declare(metadata ptr %y, metadata !17, metadata !DIExpression()), !dbg !20
%add = add nsw i32 %x, 1, !dbg !21
store volatile i32 %add, ptr %y, align 4, !dbg !20, !tbaa !22
%y.0. = load volatile i32, ptr %y, align 4, !dbg !26, !tbaa !22
%add1 = add nsw i32 %y.0., 1, !dbg !27
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %y), !dbg !28
+ call void @llvm.lifetime.end.p0(ptr nonnull %y), !dbg !28
ret i32 %add1, !dbg !29
}
@@ -57,10 +57,10 @@ entry:
declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
; Function Attrs: noinline nounwind uwtable
define dso_local i32 @bar(i32 noundef %x) #3 !dbg !30 {
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-discriminator.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-discriminator.ll
index 6d4429b..26ae198 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-discriminator.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-discriminator.ll
@@ -31,8 +31,8 @@ bb3:
}
declare void @_Z3barv() #1
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind argmemonly
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind argmemonly
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind argmemonly
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind argmemonly
attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-icp-factor.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-icp-factor.ll
index b662efa..383289e 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-icp-factor.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-icp-factor.ll
@@ -60,12 +60,12 @@ bb:
%i3 = alloca i32, align 4
store i32 0, ptr %i, align 4
call void @llvm.pseudoprobe(i64 -2624081020897602054, i64 1, i32 0, i64 -1), !dbg !62
- call void @llvm.lifetime.start.p0(i64 8, ptr %i1), !dbg !62
+ call void @llvm.lifetime.start.p0(ptr %i1), !dbg !62
call void @llvm.dbg.declare(metadata ptr %i1, metadata !57, metadata !DIExpression()), !dbg !63
- call void @llvm.lifetime.start.p0(i64 4, ptr %i2), !dbg !64
+ call void @llvm.lifetime.start.p0(ptr %i2), !dbg !64
call void @llvm.dbg.declare(metadata ptr %i2, metadata !59, metadata !DIExpression()), !dbg !65
store i32 0, ptr %i2, align 4, !dbg !65, !tbaa !19
- call void @llvm.lifetime.start.p0(i64 4, ptr %i3), !dbg !66
+ call void @llvm.lifetime.start.p0(ptr %i3), !dbg !66
call void @llvm.dbg.declare(metadata ptr %i3, metadata !60, metadata !DIExpression()), !dbg !67
store i32 0, ptr %i3, align 4, !dbg !67, !tbaa !19
br label %bb7, !dbg !66
@@ -78,7 +78,7 @@ bb7: ; preds = %bb25, %bb
bb10: ; preds = %bb7
call void @llvm.pseudoprobe(i64 -2624081020897602054, i64 3, i32 0, i64 -1), !dbg !72
- call void @llvm.lifetime.end.p0(i64 4, ptr %i3), !dbg !72
+ call void @llvm.lifetime.end.p0(ptr %i3), !dbg !72
br label %bb28
bb12: ; preds = %bb7
@@ -119,16 +119,16 @@ bb28: ; preds = %bb10
call void @llvm.pseudoprobe(i64 -2624081020897602054, i64 9, i32 0, i64 -1), !dbg !92
%i29 = load i32, ptr %i2, align 4, !dbg !92, !tbaa !19
%i30 = call i32 (ptr, ...) @printf(ptr @.str, i32 %i29), !dbg !93
- call void @llvm.lifetime.end.p0(i64 4, ptr %i2), !dbg !95
- call void @llvm.lifetime.end.p0(i64 8, ptr %i1), !dbg !95
+ call void @llvm.lifetime.end.p0(ptr %i2), !dbg !95
+ call void @llvm.lifetime.end.p0(ptr %i1), !dbg !95
ret i32 0, !dbg !96
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
declare dso_local i32 @printf(ptr, ...)
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch.ll
index 22317e6..e1d717c 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-profile-mismatch.ll
@@ -70,14 +70,14 @@ define dso_local i32 @foo(i32 noundef %x) #0 !dbg !16 {
entry:
%y = alloca i32, align 4
call void @llvm.dbg.value(metadata i32 %x, metadata !20, metadata !DIExpression()), !dbg !22
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %y), !dbg !23
+ call void @llvm.lifetime.start.p0(ptr nonnull %y), !dbg !23
call void @llvm.dbg.declare(metadata ptr %y, metadata !21, metadata !DIExpression()), !dbg !24
call void @llvm.pseudoprobe(i64 6699318081062747564, i64 1, i32 0, i64 -1), !dbg !25
%add = add nsw i32 %x, 1, !dbg !26
store volatile i32 %add, ptr %y, align 4, !dbg !24, !tbaa !27
%y.0. = load volatile i32, ptr %y, align 4, !dbg !31, !tbaa !27
%add1 = add nsw i32 %y.0., 1, !dbg !32
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %y), !dbg !33
+ call void @llvm.lifetime.end.p0(ptr nonnull %y), !dbg !33
ret i32 %add1, !dbg !34
}
@@ -85,10 +85,10 @@ entry:
declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly mustprogress nocallback nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
; Function Attrs: noinline nounwind uwtable
define dso_local i32 @bar(i32 noundef %x) #3 !dbg !35 {
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-LCS.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-LCS.ll
index cdd365b..c0976de 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-LCS.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching-LCS.ll
@@ -119,10 +119,10 @@ if.end: ; preds = %if.else, %if.then
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.start.p0(ptr nocapture) #3
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.end.p0(ptr nocapture) #3
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite)
declare void @llvm.pseudoprobe(i64, i64, i32, i64) #4
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll
index 20be0c2..0c38d9c 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-matching.ll
@@ -217,10 +217,10 @@ for.end: ; preds = %cleanup, %if.then
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture) #4
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(ptr nocapture) #4
; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none)
declare void @llvm.dbg.assign(metadata, metadata, metadata, metadata, metadata, metadata) #1
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-name-similarity.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-name-similarity.ll
index 4e435f4..dbf3dda 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-name-similarity.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-name-similarity.ll
@@ -57,10 +57,10 @@ for.body: ; preds = %for.cond
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr captures(none)) #2
+declare void @llvm.lifetime.start.p0(ptr captures(none)) #2
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr captures(none)) #2
+declare void @llvm.lifetime.end.p0(ptr captures(none)) #2
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite)
declare void @llvm.pseudoprobe(i64, i64, i32, i64) #3
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming-recursive.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming-recursive.ll
index d9db804..e246d26 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming-recursive.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming-recursive.ll
@@ -70,13 +70,13 @@ for.body: ; preds = %for.cond
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none)
declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite)
declare void @llvm.pseudoprobe(i64, i64, i32, i64) #3
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming.ll
index 6bf09ce..d1c5a9d 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-renaming.ll
@@ -175,10 +175,10 @@ for.body: ; preds = %for.cond
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.start.p0(ptr nocapture) #3
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #3
+declare void @llvm.lifetime.end.p0(ptr nocapture) #3
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite)
declare void @llvm.pseudoprobe(i64, i64, i32, i64) #4
diff --git a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-toplev-func.ll b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-toplev-func.ll
index c839364f..2ed1872 100644
--- a/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-toplev-func.ll
+++ b/llvm/test/Transforms/SampleProfile/pseudo-probe-stale-profile-toplev-func.ll
@@ -85,10 +85,10 @@ for.body: ; preds = %for.cond
}
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite)
declare void @llvm.pseudoprobe(i64, i64, i32, i64) #3
diff --git a/llvm/test/Transforms/SampleProfile/remarks.ll b/llvm/test/Transforms/SampleProfile/remarks.ll
index 9c0143a..3cb91b7 100644
--- a/llvm/test/Transforms/SampleProfile/remarks.ll
+++ b/llvm/test/Transforms/SampleProfile/remarks.ll
@@ -121,10 +121,10 @@ define i64 @_Z3foov() #0 !dbg !4 {
entry:
%sum = alloca i64, align 8
%i = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 8, ptr %sum) #4, !dbg !19
+ call void @llvm.lifetime.start.p0(ptr %sum) #4, !dbg !19
call void @llvm.dbg.declare(metadata ptr %sum, metadata !9, metadata !20), !dbg !21
store i64 0, ptr %sum, align 8, !dbg !21, !tbaa !22
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #4, !dbg !26
+ call void @llvm.lifetime.start.p0(ptr %i) #4, !dbg !26
call void @llvm.dbg.declare(metadata ptr %i, metadata !10, metadata !20), !dbg !27
store i32 0, ptr %i, align 4, !dbg !27, !tbaa !28
br label %for.cond, !dbg !26
@@ -135,7 +135,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !35
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #4, !dbg !36
+ call void @llvm.lifetime.end.p0(ptr %i) #4, !dbg !36
br label %for.end
for.body: ; preds = %for.cond
@@ -173,12 +173,12 @@ for.inc: ; preds = %if.end
for.end: ; preds = %for.cond.cleanup
%7 = load i64, ptr %sum, align 8, !dbg !53, !tbaa !22
- call void @llvm.lifetime.end.p0(i64 8, ptr %sum) #4, !dbg !54
+ call void @llvm.lifetime.end.p0(ptr %sum) #4, !dbg !54
ret i64 %7, !dbg !55
}
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nounwind readnone
declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
@@ -189,7 +189,7 @@ define i32 @rand() #3 !dbg !59 {
}
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define i32 @main() #0 !dbg !13 {
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/critedge-assume.ll b/llvm/test/Transforms/SimplifyCFG/X86/critedge-assume.ll
index 58ca8df..99e908e 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/critedge-assume.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/critedge-assume.ll
@@ -56,7 +56,7 @@ while.end:
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare i32 @_ZNK1F5beginEv(ptr)
@@ -68,7 +68,7 @@ declare noalias nonnull ptr @_Znwm(i64)
declare void @_ZN1B6appendEv(ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare i1 @llvm.type.test(ptr, metadata)
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/empty-cleanuppad.ll b/llvm/test/Transforms/SimplifyCFG/X86/empty-cleanuppad.ll
index 162a3ab..1499eecb 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/empty-cleanuppad.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/empty-cleanuppad.ll
@@ -437,7 +437,7 @@ define i32 @f9() personality ptr @__CxxFrameHandler3 {
; CHECK-LABEL: @f9(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[S:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[S]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[S]])
; CHECK-NEXT: invoke void @"\01??1S2@@QEAA@XZ"(ptr [[S]])
; CHECK-NEXT: to label [[TRY_CONT:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
; CHECK: catch.dispatch:
@@ -450,13 +450,13 @@ define i32 @f9() personality ptr @__CxxFrameHandler3 {
;
entry:
%s = alloca i8, align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %s)
+ call void @llvm.lifetime.start.p0(ptr nonnull %s)
invoke void @"\01??1S2@@QEAA@XZ"(ptr %s)
to label %try.cont unwind label %ehcleanup
ehcleanup:
%cleanup.pad = cleanuppad within none []
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %s)
+ call void @llvm.lifetime.end.p0(ptr nonnull %s)
cleanupret from %cleanup.pad unwind label %catch.dispatch
catch.dispatch:
@@ -534,7 +534,7 @@ invoke.cont2: ; preds = %invoke.cont
ehcleanup: ; preds = %invoke.cont, %entry
%0 = cleanuppad within none []
- call void @llvm.lifetime.end.p0(i64 16, ptr nonnull %x)
+ call void @llvm.lifetime.end.p0(ptr nonnull %x)
cleanupret from %0 unwind label %catch.dispatch
catch.dispatch: ; preds = %ehcleanup, %invoke.cont
@@ -556,8 +556,8 @@ declare void @use_x(i32 %x)
declare i32 @__CxxFrameHandler3(...)
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
;.
; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) }
;.
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/invalidate-dom.ll b/llvm/test/Transforms/SimplifyCFG/X86/invalidate-dom.ll
index a937d9c..ce58e936 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/invalidate-dom.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/invalidate-dom.ll
@@ -79,10 +79,10 @@ for.body: ; preds = %for.cond
declare i32 @c(...) #0
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
attributes #0 = { "use-soft-float"="false" }
attributes #1 = { "target-cpu"="x86-64" }
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll b/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll
index 62351d7..6129e3b 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/sink-common-code.ll
@@ -1338,10 +1338,10 @@ define i32 @test_not_sink_lifetime_marker(i1 zeroext %flag, i32 %x) {
; CHECK-NEXT: [[Z:%.*]] = alloca i32, align 4
; CHECK-NEXT: br i1 [[FLAG:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[Y]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Y]])
; CHECK-NEXT: br label [[IF_END:%.*]]
; CHECK: if.else:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[Z]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[Z]])
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
; CHECK-NEXT: ret i32 1
@@ -1352,11 +1352,11 @@ entry:
br i1 %flag, label %if.then, label %if.else
if.then:
- call void @llvm.lifetime.end.p0(i64 4, ptr %y)
+ call void @llvm.lifetime.end.p0(ptr %y)
br label %if.end
if.else:
- call void @llvm.lifetime.end.p0(i64 4, ptr %z)
+ call void @llvm.lifetime.end.p0(ptr %z)
br label %if.end
if.end:
@@ -1468,8 +1468,8 @@ declare void @direct_callee()
declare void @direct_callee2()
declare void @direct_callee3()
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @creating_too_many_phis(i1 %cond, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) {
; CHECK-LABEL: @creating_too_many_phis(
diff --git a/llvm/test/Transforms/SimplifyCFG/common-code-hoisting.ll b/llvm/test/Transforms/SimplifyCFG/common-code-hoisting.ll
index a430399..307501d 100644
--- a/llvm/test/Transforms/SimplifyCFG/common-code-hoisting.ll
+++ b/llvm/test/Transforms/SimplifyCFG/common-code-hoisting.ll
@@ -39,8 +39,8 @@ declare void @f0()
declare void @f1()
declare void @f2()
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
define void @_Z4loopi(i1 %cmp) {
; HOIST-LABEL: @_Z4loopi(
diff --git a/llvm/test/Transforms/SimplifyCFG/invoke_unwind_lifetime.ll b/llvm/test/Transforms/SimplifyCFG/invoke_unwind_lifetime.ll
index ea14b17..40e9a49 100644
--- a/llvm/test/Transforms/SimplifyCFG/invoke_unwind_lifetime.ll
+++ b/llvm/test/Transforms/SimplifyCFG/invoke_unwind_lifetime.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; RUN: opt < %s -passes=simplifycfg -simplifycfg-require-and-preserve-domtree=1 -S | FileCheck %s
-declare void @llvm.lifetime.start.p0(i64, ptr)
-declare void @llvm.lifetime.end.p0(i64, ptr)
+declare void @llvm.lifetime.start.p0(ptr)
+declare void @llvm.lifetime.end.p0(ptr)
declare void @escape(ptr)
@@ -15,16 +15,16 @@ define void @caller(i1 %c) personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @caller(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[I0:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I0]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I0]])
; CHECK-NEXT: call void @escape(ptr [[I0]])
; CHECK-NEXT: [[I2:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I2]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I2]])
; CHECK-NEXT: call void @escape(ptr [[I2]])
; CHECK-NEXT: [[I4:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I4]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I4]])
; CHECK-NEXT: call void @escape(ptr [[I4]])
; CHECK-NEXT: [[I6:%.*]] = alloca i32, align 4
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[I6]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[I6]])
; CHECK-NEXT: call void @escape(ptr [[I6]])
; CHECK-NEXT: br i1 [[C:%.*]], label [[V0:%.*]], label [[V1:%.*]]
; CHECK: v0:
@@ -36,19 +36,19 @@ define void @caller(i1 %c) personality ptr @__gxx_personality_v0 {
;
entry:
%i0 = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i0)
+ call void @llvm.lifetime.start.p0(ptr nonnull %i0)
call void @escape(ptr %i0)
%i2 = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i2)
+ call void @llvm.lifetime.start.p0(ptr nonnull %i2)
call void @escape(ptr %i2)
%i4 = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i4)
+ call void @llvm.lifetime.start.p0(ptr nonnull %i4)
call void @escape(ptr %i4)
%i6 = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i6)
+ call void @llvm.lifetime.start.p0(ptr nonnull %i6)
call void @escape(ptr %i6)
br i1 %c, label %v0, label %v1
@@ -66,14 +66,14 @@ invoke.cont:
lpad.v0:
%i8 = landingpad { ptr, i32 } cleanup
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i0)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i4)
+ call void @llvm.lifetime.end.p0(ptr nonnull %i0)
+ call void @llvm.lifetime.end.p0(ptr nonnull %i4)
br label %end
lpad.v1:
%i9 = landingpad { ptr, i32 } cleanup
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i2)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %i6)
+ call void @llvm.lifetime.end.p0(ptr nonnull %i2)
+ call void @llvm.lifetime.end.p0(ptr nonnull %i6)
br label %end
end:
diff --git a/llvm/test/Transforms/SimplifyCFG/lifetime-landingpad.ll b/llvm/test/Transforms/SimplifyCFG/lifetime-landingpad.ll
index 0174eb1..88395a0 100644
--- a/llvm/test/Transforms/SimplifyCFG/lifetime-landingpad.ll
+++ b/llvm/test/Transforms/SimplifyCFG/lifetime-landingpad.ll
@@ -5,32 +5,32 @@ define void @foo() personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[A]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[A]]) #[[ATTR1:[0-9]+]]
; CHECK-NEXT: call void @bar()
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[A]]) #[[ATTR1]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[A]]) #[[ATTR1]]
; CHECK-NEXT: ret void
;
entry:
%a = alloca i8
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %a) nounwind
+ call void @llvm.lifetime.start.p0(ptr nonnull %a) nounwind
invoke void @bar() to label %invoke.cont unwind label %lpad
invoke.cont:
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %a) nounwind
+ call void @llvm.lifetime.end.p0(ptr nonnull %a) nounwind
ret void
lpad:
%b = landingpad { ptr, i32 }
cleanup
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %a) nounwind
+ call void @llvm.lifetime.end.p0(ptr nonnull %a) nounwind
resume { ptr, i32 } %b
}
declare void @bar()
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
declare i32 @__gxx_personality_v0(...)
;.
diff --git a/llvm/test/Transforms/SimplifyCFG/lifetime.ll b/llvm/test/Transforms/SimplifyCFG/lifetime.ll
index d6bba2c..fac0b61 100644
--- a/llvm/test/Transforms/SimplifyCFG/lifetime.ll
+++ b/llvm/test/Transforms/SimplifyCFG/lifetime.ll
@@ -10,11 +10,11 @@
define void @foo(i1 %x) {
entry:
%a = alloca i8
- call void @llvm.lifetime.start.p0(i64 -1, ptr %a) nounwind
+ call void @llvm.lifetime.start.p0(ptr %a) nounwind
br i1 %x, label %bb0, label %bb1
bb0:
- call void @llvm.lifetime.end.p0(i64 -1, ptr %a) nounwind
+ call void @llvm.lifetime.end.p0(ptr %a) nounwind
br label %bb1
bb1:
@@ -24,6 +24,6 @@ bb1:
declare void @f()
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
+declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind
diff --git a/llvm/test/Transforms/SimplifyCFG/pr50060-constantfold-loopid.ll b/llvm/test/Transforms/SimplifyCFG/pr50060-constantfold-loopid.ll
index 55f1c01..19e1c73 100644
--- a/llvm/test/Transforms/SimplifyCFG/pr50060-constantfold-loopid.ll
+++ b/llvm/test/Transforms/SimplifyCFG/pr50060-constantfold-loopid.ll
@@ -22,7 +22,7 @@ define dso_local void @_Z6test01v() addrspace(1) #0 {
; CHECK: do.body:
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @C, align 4, !tbaa [[TBAA2:![0-9]+]]
; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1
-; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.start.p0(i64 4, ptr [[J]]) #[[ATTR2:[0-9]+]]
+; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.start.p0(ptr [[J]]) #[[ATTR2:[0-9]+]]
; CHECK-NEXT: store i32 0, ptr [[J]], align 4, !tbaa [[TBAA2]]
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond:
@@ -30,11 +30,11 @@ define dso_local void @_Z6test01v() addrspace(1) #0 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], 3
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.end.p0(i64 4, ptr [[J]]) #[[ATTR2]]
+; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.end.p0(ptr [[J]]) #[[ATTR2]]
; CHECK-NEXT: br label [[DO_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: for.body:
; CHECK-NEXT: store i32 undef, ptr [[I]], align 4
-; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]]
+; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]]
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !tbaa [[TBAA2]]
; CHECK-NEXT: br label [[FOR_COND1:%.*]]
; CHECK: for.cond1:
@@ -43,7 +43,7 @@ define dso_local void @_Z6test01v() addrspace(1) #0 {
; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP2]], [[TMP3]]
; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY4:%.*]], label [[FOR_COND_CLEANUP3:%.*]]
; CHECK: for.cond.cleanup3:
-; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]]
+; CHECK-NEXT: call addrspace(1) void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]]
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[J]], align 4, !tbaa [[TBAA2]]
; CHECK-NEXT: [[INC7:%.*]] = add nsw i32 [[TMP4]], 1
; CHECK-NEXT: store i32 [[INC7]], ptr [[J]], align 4, !tbaa [[TBAA2]]
@@ -64,7 +64,7 @@ entry:
do.body: ; preds = %do.cond, %entry
%0 = load i32, ptr @C, align 4, !tbaa !2
%inc = add nsw i32 %0, 1
- call addrspace(1) void @llvm.lifetime.start.p0(i64 4, ptr %j) #2
+ call addrspace(1) void @llvm.lifetime.start.p0(ptr %j) #2
store i32 0, ptr %j, align 4, !tbaa !2
br label %for.cond
@@ -74,12 +74,12 @@ for.cond: ; preds = %for.inc6, %do.body
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond
- call addrspace(1) void @llvm.lifetime.end.p0(i64 4, ptr %j) #2
+ call addrspace(1) void @llvm.lifetime.end.p0(ptr %j) #2
br label %for.end8
for.body: ; preds = %for.cond
store i32 undef, ptr %i, align 4
- call addrspace(1) void @llvm.lifetime.start.p0(i64 4, ptr %i) #2
+ call addrspace(1) void @llvm.lifetime.start.p0(ptr %i) #2
store i32 0, ptr %i, align 4, !tbaa !2
br label %for.cond1
@@ -90,7 +90,7 @@ for.cond1: ; preds = %for.inc, %for.body
br i1 %cmp2, label %for.body4, label %for.cond.cleanup3
for.cond.cleanup3: ; preds = %for.cond1
- call addrspace(1) void @llvm.lifetime.end.p0(i64 4, ptr %i) #2
+ call addrspace(1) void @llvm.lifetime.end.p0(ptr %i) #2
br label %for.end
for.body4: ; preds = %for.cond1
@@ -124,10 +124,10 @@ do.end: ; preds = %do.cond
}
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) addrspace(1) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) addrspace(1) #1
; Function Attrs: argmemonly nofree nosync nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) addrspace(1) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) addrspace(1) #1
attributes #0 = { nounwind "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { argmemonly nofree nosync nounwind willreturn }
diff --git a/llvm/test/Transforms/SimplifyCFG/sink-and-convert-switch.ll b/llvm/test/Transforms/SimplifyCFG/sink-and-convert-switch.ll
index 87d6493..0014b91 100644
--- a/llvm/test/Transforms/SimplifyCFG/sink-and-convert-switch.ll
+++ b/llvm/test/Transforms/SimplifyCFG/sink-and-convert-switch.ll
@@ -8,16 +8,16 @@ define void @pr104567(i8 %x, ptr %f) {
; CHECK-SAME: i8 [[X:%.*]], ptr [[F:%.*]]) {
; CHECK-NEXT: [[START:.*:]]
; CHECK-NEXT: [[Y:%.*]] = alloca [1 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr nonnull [[Y]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[Y]])
; CHECK-NEXT: [[SWITCH_OFFSET:%.*]] = add nsw i8 [[X]], 4
; CHECK-NEXT: store i8 [[SWITCH_OFFSET]], ptr [[Y]], align 1
; CHECK-NEXT: call void [[F]](ptr [[Y]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr nonnull [[Y]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[Y]])
; CHECK-NEXT: ret void
;
start:
%y = alloca [1 x i8], align 1
- call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %y)
+ call void @llvm.lifetime.start.p0(ptr nonnull %y)
switch i8 %x, label %default.unreachable [
i8 0, label %bb4
i8 1, label %bb3
@@ -41,7 +41,7 @@ bb2:
bb5:
call void %f(ptr %y)
- call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %y)
+ call void @llvm.lifetime.end.p0(ptr nonnull %y)
ret void
}
diff --git a/llvm/test/Transforms/SimplifyCFG/tail-merge-noreturn.ll b/llvm/test/Transforms/SimplifyCFG/tail-merge-noreturn.ll
index 10e4870..77ce730 100644
--- a/llvm/test/Transforms/SimplifyCFG/tail-merge-noreturn.ll
+++ b/llvm/test/Transforms/SimplifyCFG/tail-merge-noreturn.ll
@@ -315,8 +315,8 @@ cont3:
; from sharing stack slots for x and y.
declare void @escape_i32_ptr(ptr)
-declare void @llvm.lifetime.start(i64, ptr nocapture)
-declare void @llvm.lifetime.end(i64, ptr nocapture)
+declare void @llvm.lifetime.start(ptr nocapture)
+declare void @llvm.lifetime.end(ptr nocapture)
define void @dont_merge_lifetimes(i32 %c1, i32 %c2) {
; CHECK-LABEL: @dont_merge_lifetimes(
@@ -328,7 +328,7 @@ define void @dont_merge_lifetimes(i32 %c1, i32 %c2) {
; CHECK-NEXT: i32 42, label [[IF_THEN3:%.*]]
; CHECK-NEXT: ]
; CHECK: if.then:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[X]])
; CHECK-NEXT: store i32 0, ptr [[X]], align 4
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[C2:%.*]], 0
; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN1:%.*]]
@@ -336,11 +336,11 @@ define void @dont_merge_lifetimes(i32 %c1, i32 %c2) {
; CHECK-NEXT: call void @escape_i32_ptr(ptr nonnull [[X]])
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[X]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[X]])
; CHECK-NEXT: call void @abort()
; CHECK-NEXT: unreachable
; CHECK: if.then3:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[Y]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[Y]])
; CHECK-NEXT: store i32 0, ptr [[Y]], align 4
; CHECK-NEXT: [[TOBOOL5:%.*]] = icmp eq i32 [[C2]], 0
; CHECK-NEXT: br i1 [[TOBOOL5]], label [[IF_END7:%.*]], label [[IF_THEN6:%.*]]
@@ -348,7 +348,7 @@ define void @dont_merge_lifetimes(i32 %c1, i32 %c2) {
; CHECK-NEXT: call void @escape_i32_ptr(ptr nonnull [[Y]])
; CHECK-NEXT: br label [[IF_END7]]
; CHECK: if.end7:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[Y]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[Y]])
; CHECK-NEXT: call void @abort()
; CHECK-NEXT: unreachable
; CHECK: if.end9:
@@ -363,7 +363,7 @@ entry:
]
if.then: ; preds = %entry
- call void @llvm.lifetime.start(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.start(ptr nonnull %x)
store i32 0, ptr %x, align 4
%tobool = icmp eq i32 %c2, 0
br i1 %tobool, label %if.end, label %if.then1
@@ -373,12 +373,12 @@ if.then1: ; preds = %if.then
br label %if.end
if.end: ; preds = %if.then1, %if.then
- call void @llvm.lifetime.end(i64 4, ptr nonnull %x)
+ call void @llvm.lifetime.end(ptr nonnull %x)
call void @abort()
unreachable
if.then3: ; preds = %entry
- call void @llvm.lifetime.start(i64 4, ptr nonnull %y)
+ call void @llvm.lifetime.start(ptr nonnull %y)
store i32 0, ptr %y, align 4
%tobool5 = icmp eq i32 %c2, 0
br i1 %tobool5, label %if.end7, label %if.then6
@@ -388,7 +388,7 @@ if.then6: ; preds = %if.then3
br label %if.end7
if.end7: ; preds = %if.then6, %if.then3
- call void @llvm.lifetime.end(i64 4, ptr nonnull %y)
+ call void @llvm.lifetime.end(ptr nonnull %y)
call void @abort()
unreachable
diff --git a/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll b/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll
index 325db79..fa771ad 100644
--- a/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll
+++ b/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll
@@ -44,17 +44,17 @@ define dso_local void @_Z7dostuff1AS_i(ptr nocapture byval(%struct.A) align 8 %a
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [[STRUCT_A]], ptr [[B]], i64 0, i32 0, i64 5
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARRAYIDX4]], align 8
; CHECK-NEXT: [[CALL:%.*]] = tail call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str, i64 [[INC]], i64 [[TMP1]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 80, ptr nonnull [[AGG_TMP]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[AGG_TMP]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(80) [[AGG_TMP]], ptr nonnull align 8 dereferenceable(80) [[B]], i64 80, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 80, ptr nonnull [[AGG_TMP5]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[AGG_TMP5]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(80) [[AGG_TMP5]], ptr nonnull align 8 dereferenceable(80) [[A]], i64 80, i1 false)
; CHECK-NEXT: [[ADD]] = add nsw i32 [[I_TR]], 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[AGG_TMP1]], ptr align 8 [[AGG_TMP]], i64 80, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[AGG_TMP52]], ptr align 8 [[AGG_TMP5]], i64 80, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[A]], ptr align 8 [[AGG_TMP1]], i64 80, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[B]], ptr align 8 [[AGG_TMP52]], i64 80, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 80, ptr nonnull [[AGG_TMP]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 80, ptr nonnull [[AGG_TMP5]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[AGG_TMP]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[AGG_TMP5]])
; CHECK-NEXT: br label [[TAILRECURSE]]
; CHECK: return:
; CHECK-NEXT: ret void
@@ -74,14 +74,14 @@ if.end: ; preds = %entry
%1 = load i64, ptr %arrayidx4, align 8
%call = call i32 (ptr, ...) @printf(ptr nonnull dereferenceable(1) @.str
, i64 %inc, i64 %1)
- call void @llvm.lifetime.start.p0(i64 80, ptr nonnull %agg.tmp)
+ call void @llvm.lifetime.start.p0(ptr nonnull %agg.tmp)
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(80) %agg.tmp, ptr nonnull align 8 dereferenceable(80) %b, i64 80, i1 false)
- call void @llvm.lifetime.start.p0(i64 80, ptr nonnull %agg.tmp5)
+ call void @llvm.lifetime.start.p0(ptr nonnull %agg.tmp5)
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(80) %agg.tmp5, ptr nonnull align 8 dereferenceable(80) %a, i64 80, i1 false)
%add = add nsw i32 %i, 1
call void @_Z7dostuff1AS_i(ptr nonnull byval(%struct.A) align 8 %agg.tmp, ptr nonnull byval(%struct.A) align 8 %agg.tmp5, i32 %add)
- call void @llvm.lifetime.end.p0(i64 80, ptr nonnull %agg.tmp)
- call void @llvm.lifetime.end.p0(i64 80, ptr nonnull %agg.tmp5)
+ call void @llvm.lifetime.end.p0(ptr nonnull %agg.tmp)
+ call void @llvm.lifetime.end.p0(ptr nonnull %agg.tmp5)
br label %return
return: ; preds = %entry, %if.end
@@ -95,10 +95,10 @@ declare dso_local noundef i32 @printf(ptr nocapture noundef readonly, ...) local
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
; Function Attrs: noinline norecurse nounwind optnone uwtable
define dso_local i32 @main() local_unnamed_addr #3 {
diff --git a/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll b/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll
index 256fb04..dedd081 100644
--- a/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll
+++ b/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll
@@ -44,14 +44,14 @@ define dso_local i32 @_Z3fooi1S(i32 %count, ptr nocapture readonly byval(%struct
; CHECK: if.end:
; CHECK-NEXT: [[ADD]] = add nsw i32 [[COUNT_TR]], 1
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) [[AGG_TMP1]], ptr nonnull align 8 dereferenceable(20) [[P1]], i64 20, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr nonnull [[AGG_TMP14]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 20, ptr nonnull [[AGG_TMP_I]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[AGG_TMP14]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[AGG_TMP_I]])
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) [[AGG_TMP14]], ptr nonnull align 8 dereferenceable(20) [[AGG_TMP1]], i64 20, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) [[AGG_TMP_I]], ptr nonnull align 8 dereferenceable(20) [[AGG_TMP14]], i64 20, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[AGG_TMP_I1]], ptr align 8 [[AGG_TMP_I]], i64 20, i1 false)
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[P1]], ptr align 8 [[AGG_TMP_I1]], i64 20, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr nonnull [[AGG_TMP14]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 20, ptr nonnull [[AGG_TMP_I]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[AGG_TMP14]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[AGG_TMP_I]])
; CHECK-NEXT: br label [[TAILRECURSE]]
; CHECK: return:
; CHECK-NEXT: ret i32 [[CALL]]
@@ -72,13 +72,13 @@ if.then: ; preds = %entry
if.end: ; preds = %entry
%add = add nsw i32 %count, 1
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) %agg.tmp1, ptr nonnull align 8 dereferenceable(20) %p1, i64 20, i1 false)
- call void @llvm.lifetime.start.p0(i64 20, ptr nonnull %agg.tmp14)
- call void @llvm.lifetime.start.p0(i64 20, ptr nonnull %agg.tmp.i)
+ call void @llvm.lifetime.start.p0(ptr nonnull %agg.tmp14)
+ call void @llvm.lifetime.start.p0(ptr nonnull %agg.tmp.i)
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) %agg.tmp14, ptr nonnull align 8 dereferenceable(20) %agg.tmp1, i64 20, i1 false)
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(20) %agg.tmp.i, ptr nonnull align 8 dereferenceable(20) %agg.tmp14, i64 20, i1 false)
%call.i = call i32 @_Z3fooi1S(i32 %add, ptr nonnull byval(%struct.S) align 8 %agg.tmp.i)
- call void @llvm.lifetime.end.p0(i64 20, ptr nonnull %agg.tmp14)
- call void @llvm.lifetime.end.p0(i64 20, ptr nonnull %agg.tmp.i)
+ call void @llvm.lifetime.end.p0(ptr nonnull %agg.tmp14)
+ call void @llvm.lifetime.end.p0(ptr nonnull %agg.tmp.i)
br label %return
return: ; preds = %if.end, %if.then
@@ -89,10 +89,10 @@ return: ; preds = %if.end, %if.then
declare dso_local i32 @_Z3zoo1S(ptr byval(%struct.S) align 8) local_unnamed_addr #1
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind willreturn
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #2
diff --git a/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll b/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll
index 293deca..b77ae9c 100644
--- a/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll
+++ b/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll
@@ -49,11 +49,11 @@ define dso_local void @_Z19test_multiple_exitsi(i32 %param) local_unnamed_addr #
; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[PARAM_TR]], 10
; CHECK-NEXT: br i1 [[TMP0]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
; CHECK: if.then:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[TEMP]]) #1
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]]) #1
; CHECK-NEXT: call void @_Z11capture_argPi(ptr nonnull [[TEMP]])
; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[PARAM_TR]], 1
; CHECK-NEXT: call void @_Z19test_multiple_exitsi(i32 [[ADD]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[TEMP]]) #1
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]]) #1
; CHECK-NEXT: br label [[IF_END14:%.*]]
; CHECK: if.else:
; CHECK-NEXT: [[PARAM_OFF:%.*]] = add i32 [[PARAM_TR]], -10
@@ -80,11 +80,11 @@ entry:
br i1 %0, label %if.then, label %if.else
if.then: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %temp) #2
+ call void @llvm.lifetime.start.p0(ptr nonnull %temp) #2
call void @_Z11capture_argPi(ptr nonnull %temp)
%add = add nuw nsw i32 %param, 1
call void @_Z19test_multiple_exitsi(i32 %add)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %temp) #2
+ call void @llvm.lifetime.end.p0(ptr nonnull %temp) #2
br label %if.end14
if.else: ; preds = %entry
@@ -113,10 +113,10 @@ if.end14: ; preds = %if.then5, %if.then1
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
attributes #0 = { nofree noinline norecurse nounwind uwtable }
attributes #1 = { nounwind uwtable }
diff --git a/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll b/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll
index c9ac9a5d..2f1aded 100644
--- a/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll
+++ b/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll
@@ -34,11 +34,11 @@ define dso_local void @_Z4testi(i32 %recurseCount) local_unnamed_addr #1 {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[RECURSECOUNT_TR]], 0
; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr nonnull [[TEMP]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TEMP]])
; CHECK-NEXT: store i32 10, ptr [[TEMP]], align 4
; CHECK-NEXT: call void @_Z15globalIncrementPKi(ptr nonnull [[TEMP]])
; CHECK-NEXT: [[SUB]] = add nsw i32 [[RECURSECOUNT_TR]], -1
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr nonnull [[TEMP]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr nonnull [[TEMP]])
; CHECK-NEXT: br label [[TAILRECURSE]]
; CHECK: return:
; CHECK-NEXT: ret void
@@ -49,12 +49,12 @@ entry:
br i1 %cmp, label %return, label %if.end
if.end: ; preds = %entry
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %temp) #6
+ call void @llvm.lifetime.start.p0(ptr nonnull %temp) #6
store i32 10, ptr %temp, align 4
call void @_Z15globalIncrementPKi(ptr nonnull %temp)
%sub = add nsw i32 %recurseCount, -1
call void @_Z4testi(i32 %sub)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %temp) #6
+ call void @llvm.lifetime.end.p0(ptr nonnull %temp) #6
br label %return
return: ; preds = %entry, %if.end
@@ -62,10 +62,10 @@ return: ; preds = %entry, %if.end
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.start.p0(ptr nocapture) #2
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #2
+declare void @llvm.lifetime.end.p0(ptr nocapture) #2
attributes #0 = { nofree noinline norecurse nounwind uwtable }
attributes #1 = { nounwind uwtable }
diff --git a/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll b/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll
index 14bfbb1..36eaf6e 100644
--- a/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll
+++ b/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll
@@ -61,8 +61,8 @@ define i32 @main() {
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/Util/dbg-call-bitcast.ll b/llvm/test/Transforms/Util/dbg-call-bitcast.ll
index d8d80ab..f0c579c 100644
--- a/llvm/test/Transforms/Util/dbg-call-bitcast.ll
+++ b/llvm/test/Transforms/Util/dbg-call-bitcast.ll
@@ -2,7 +2,7 @@
define dso_local void @_Z1fv() {
%1 = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
call void @llvm.dbg.declare(metadata ptr %1, metadata !16, metadata !DIExpression()), !dbg !19
; CHECK: %[[A:.*]] = alloca i32, align 4
; CHECK: #dbg_value(ptr %[[A]], {{.*}}, !DIExpression(DW_OP_deref)
@@ -11,13 +11,13 @@ define dso_local void @_Z1fv() {
; CHECK-NOT: #dbg_value
; CHECK: call void @_Z1gPv
call void @_Z1gPv(ptr nonnull %1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
ret void, !dbg !21
}
define dso_local void @_Z2fv() {
%1 = alloca i32, align 4
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1)
+ call void @llvm.lifetime.start.p0(ptr nonnull %1)
call void @llvm.dbg.declare(metadata ptr %1, metadata !16, metadata !DIExpression()), !dbg !19
; CHECK: %[[A:.*]] = alloca i32, align 4
; CHECK: #dbg_value(ptr %[[A]], {{.*}}, !DIExpression(DW_OP_deref)
@@ -29,14 +29,14 @@ block2:
; CHECK: #dbg_value(ptr %[[A]], {{.*}}, !DIExpression(DW_OP_deref)
; CHECK: call void @_Z1gPv
call void @_Z1gPv(ptr nonnull %1)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %1)
+ call void @llvm.lifetime.end.p0(ptr nonnull %1)
ret void, !dbg !21
}
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare void @llvm.dbg.declare(metadata, metadata, metadata)
declare dso_local void @_Z1gPv(ptr)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!8, !9, !10}
diff --git a/llvm/test/Verifier/intrinsic-immarg.ll b/llvm/test/Verifier/intrinsic-immarg.ll
index c1bb932..d5aef3d 100644
--- a/llvm/test/Verifier/intrinsic-immarg.ll
+++ b/llvm/test/Verifier/intrinsic-immarg.ll
@@ -163,26 +163,6 @@ define void @test_scatter_8i32(<8 x i32> %a1, <8 x ptr> %ptr, <8 x i1> %mask, i3
ret void
}
-declare void @llvm.lifetime.start.p0(i64, ptr)
-define void @test_lifetime_start(i64 %arg0) {
- ; CHECK: immarg operand has non-immediate parameter
- ; CHECK-NEXT: i64 %arg0
- ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 %arg0, ptr %ptr)
- %ptr = alloca i64
- call void @llvm.lifetime.start.p0(i64 %arg0, ptr %ptr)
- ret void
-}
-
-declare void @llvm.lifetime.end.p0(i64, ptr)
-define void @test_lifetime_end(i64 %arg0) {
- ; CHECK: immarg operand has non-immediate parameter
- ; CHECK-NEXT: i64 %arg0
- ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 %arg0, ptr %ptr)
- %ptr = alloca i64
- call void @llvm.lifetime.end.p0(i64 %arg0, ptr %ptr)
- ret void
-}
-
declare ptr @llvm.invariant.start.p0(i64, ptr)
define void @test_invariant_start(i64 %arg0, ptr %ptr) {
; CHECK: immarg operand has non-immediate parameter
diff --git a/llvm/test/Verifier/opaque-ptr.ll b/llvm/test/Verifier/opaque-ptr.ll
index 10e43a4..3ac9044 100644
--- a/llvm/test/Verifier/opaque-ptr.ll
+++ b/llvm/test/Verifier/opaque-ptr.ll
@@ -40,13 +40,13 @@ define void @atomicrmw(ptr %a, i32 %i) {
define void @opaque_mangle() {
; CHECK-LABEL: @opaque_mangle(
; CHECK-NEXT: [[A:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[A]])
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[A]])
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[A]])
; CHECK-NEXT: ret void
;
%a = alloca i64
- call void @llvm.lifetime.start.p0(i64 8, ptr %a)
- call void @llvm.lifetime.end.p0(i64 8, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
@@ -65,10 +65,8 @@ define void @intrinsic_calls(ptr %a) {
ret void
}
-; CHECK: @llvm.lifetime.start.p0
-; CHECK: @llvm.lifetime.end.p0
-declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
-declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
+declare void @llvm.lifetime.start.p0(ptr nocapture)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
declare <2 x i32> @llvm.masked.load.v2i32.p0(ptr, i32, <2 x i1>, <2 x i32>)
declare void @llvm.masked.store.v2i32.p0(<2 x i32>, ptr, i32, <2 x i1>)
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll
index 96ff2d7..ef60118 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll
@@ -14,7 +14,7 @@ entry:
#dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17)
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !13, !DIExpression(), !17)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22
#dbg_declare(ptr %i, !14, !DIExpression(), !23)
store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24
br label %for.cond, !dbg !22
@@ -27,7 +27,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33
br label %for.end
for.body: ; preds = %for.cond
@@ -49,10 +49,10 @@ for.end: ; preds = %for.cond.cleanup
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local void @bar(ptr %A) #0 !dbg !41 {
@@ -61,7 +61,7 @@ entry:
%i = alloca i32, align 4
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !43, !DIExpression(), !46)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47
#dbg_declare(ptr %i, !44, !DIExpression(), !48)
store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24
br label %for.cond, !dbg !47
@@ -74,7 +74,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55
br label %for.end
for.body: ; preds = %for.cond
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.expected
index 6504830..4bae52e 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.expected
@@ -16,7 +16,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]])
; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]]
@@ -27,7 +27,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]]
@@ -50,7 +50,7 @@ entry:
#dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17)
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !13, !DIExpression(), !17)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22
#dbg_declare(ptr %i, !14, !DIExpression(), !23)
store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24
br label %for.cond, !dbg !22
@@ -63,7 +63,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33
br label %for.end
for.body: ; preds = %for.cond
@@ -85,10 +85,10 @@ for.end: ; preds = %for.cond.cleanup
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local void @bar(ptr %A) #0 !dbg !41 {
@@ -98,7 +98,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]]
@@ -109,7 +109,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]]
@@ -131,7 +131,7 @@ entry:
%i = alloca i32, align 4
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !43, !DIExpression(), !46)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47
#dbg_declare(ptr %i, !44, !DIExpression(), !48)
store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24
br label %for.cond, !dbg !47
@@ -144,7 +144,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55
br label %for.end
for.body: ; preds = %for.cond
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.expected
index 7c1ea5e..12c6e4e 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.expected
@@ -17,7 +17,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]])
; CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]]
@@ -28,7 +28,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]]
@@ -51,7 +51,7 @@ entry:
#dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17)
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !13, !DIExpression(), !17)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22
#dbg_declare(ptr %i, !14, !DIExpression(), !23)
store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24
br label %for.cond, !dbg !22
@@ -64,7 +64,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33
br label %for.end
for.body: ; preds = %for.cond
@@ -86,10 +86,10 @@ for.end: ; preds = %for.cond.cleanup
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local void @bar(ptr %A) #0 !dbg !41 {
@@ -100,7 +100,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]]
@@ -111,7 +111,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]]
@@ -133,7 +133,7 @@ entry:
%i = alloca i32, align 4
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !43, !DIExpression(), !46)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47
#dbg_declare(ptr %i, !44, !DIExpression(), !48)
store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24
br label %for.cond, !dbg !47
@@ -146,7 +146,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55
br label %for.end
for.body: ; preds = %for.cond
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.globals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.globals.expected
index 94af952..d67a303 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.globals.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.globals.expected
@@ -17,7 +17,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]])
; CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]]
@@ -28,7 +28,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]]
@@ -51,7 +51,7 @@ entry:
#dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17)
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !13, !DIExpression(), !17)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22
#dbg_declare(ptr %i, !14, !DIExpression(), !23)
store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24
br label %for.cond, !dbg !22
@@ -64,7 +64,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33
br label %for.end
for.body: ; preds = %for.cond
@@ -86,10 +86,10 @@ for.end: ; preds = %for.cond.cleanup
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local void @bar(ptr %A) #0 !dbg !41 {
@@ -100,7 +100,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]]
@@ -111,7 +111,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]]
@@ -133,7 +133,7 @@ entry:
%i = alloca i32, align 4
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !43, !DIExpression(), !46)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47
#dbg_declare(ptr %i, !44, !DIExpression(), !48)
store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24
br label %for.cond, !dbg !47
@@ -146,7 +146,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55
br label %for.end
for.body: ; preds = %for.cond
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.noglobals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.noglobals.expected
index 6504830..4bae52e 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.noglobals.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.noglobals.expected
@@ -16,7 +16,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]])
; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]]
@@ -27,7 +27,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]]
@@ -50,7 +50,7 @@ entry:
#dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17)
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !13, !DIExpression(), !17)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22
#dbg_declare(ptr %i, !14, !DIExpression(), !23)
store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24
br label %for.cond, !dbg !22
@@ -63,7 +63,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33
br label %for.end
for.body: ; preds = %for.cond
@@ -85,10 +85,10 @@ for.end: ; preds = %for.cond.cleanup
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local void @bar(ptr %A) #0 !dbg !41 {
@@ -98,7 +98,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]]
@@ -109,7 +109,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]]
@@ -131,7 +131,7 @@ entry:
%i = alloca i32, align 4
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !43, !DIExpression(), !46)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47
#dbg_declare(ptr %i, !44, !DIExpression(), !48)
store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24
br label %for.cond, !dbg !47
@@ -144,7 +144,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55
br label %for.end
for.body: ; preds = %for.cond
diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.transitiveglobals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.transitiveglobals.expected
index a656c4ae..fb3a76f 100644
--- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.transitiveglobals.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/various_ir_values_dbgrecords.ll.funcsig.transitiveglobals.expected
@@ -16,7 +16,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: #dbg_assign(i1 undef, [[META13:![0-9]+]], !DIExpression(), [[DIASSIGNID16]], ptr [[A_ADDR]], !DIExpression(), [[META17:![0-9]+]])
; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META13]], !DIExpression(), [[META17]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2:[0-9]+]], !dbg [[DBG22:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META14:![0-9]+]], !DIExpression(), [[META23:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META23]], !tbaa [[TBAA24:![0-9]+]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG22]]
@@ -27,7 +27,7 @@ define dso_local void @foo(ptr %A) #0 !dbg !7 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG30:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG31:![0-9]+]], !prof [[PROF32:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG33:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG34:![0-9]+]], !tbaa [[TBAA18]]
@@ -50,7 +50,7 @@ entry:
#dbg_assign(i1 undef, !13, !DIExpression(), !16, ptr %A.addr, !DIExpression(), !17)
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !13, !DIExpression(), !17)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !22
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !22
#dbg_declare(ptr %i, !14, !DIExpression(), !23)
store i32 0, ptr %i, align 4, !dbg !23, !tbaa !24
br label %for.cond, !dbg !22
@@ -63,7 +63,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !31, !prof !32
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !33
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !33
br label %for.end
for.body: ; preds = %for.cond
@@ -85,10 +85,10 @@ for.end: ; preds = %for.cond.cleanup
}
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture) #1
; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture) #1
; Function Attrs: nounwind uwtable
define dso_local void @bar(ptr %A) #0 !dbg !41 {
@@ -98,7 +98,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
; CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8, !tbaa [[TBAA18]]
; CHECK-NEXT: #dbg_declare(ptr [[A_ADDR]], [[META43:![0-9]+]], !DIExpression(), [[META46:![0-9]+]])
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG47:![0-9]+]]
; CHECK-NEXT: #dbg_declare(ptr [[I]], [[META44:![0-9]+]], !DIExpression(), [[META48:![0-9]+]])
; CHECK-NEXT: store i32 0, ptr [[I]], align 4, !dbg [[META48]], !tbaa [[TBAA24]]
; CHECK-NEXT: br label [[FOR_COND:%.*]], !dbg [[DBG47]]
@@ -109,7 +109,7 @@ define dso_local void @bar(ptr %A) #0 !dbg !41 {
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], [[TMP2]], !dbg [[DBG53:![0-9]+]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]], !dbg [[DBG54:![0-9]+]]
; CHECK: for.cond.cleanup:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[I]]) #[[ATTR2]], !dbg [[DBG55:![0-9]+]]
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !dbg [[DBG56:![0-9]+]], !tbaa [[TBAA18]]
@@ -131,7 +131,7 @@ entry:
%i = alloca i32, align 4
store ptr %A, ptr %A.addr, align 8, !tbaa !18
#dbg_declare(ptr %A.addr, !43, !DIExpression(), !46)
- call void @llvm.lifetime.start.p0(i64 4, ptr %i) #2, !dbg !47
+ call void @llvm.lifetime.start.p0(ptr %i) #2, !dbg !47
#dbg_declare(ptr %i, !44, !DIExpression(), !48)
store i32 0, ptr %i, align 4, !dbg !48, !tbaa !24
br label %for.cond, !dbg !47
@@ -144,7 +144,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.cond.cleanup, !dbg !54
for.cond.cleanup: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(i64 4, ptr %i) #2, !dbg !55
+ call void @llvm.lifetime.end.p0(ptr %i) #2, !dbg !55
br label %for.end
for.body: ; preds = %for.cond
diff --git a/llvm/test/tools/llvm-reduce/operands-to-args-lifetimes.ll b/llvm/test/tools/llvm-reduce/operands-to-args-lifetimes.ll
index d9ed9df..5db1989 100644
--- a/llvm/test/tools/llvm-reduce/operands-to-args-lifetimes.ll
+++ b/llvm/test/tools/llvm-reduce/operands-to-args-lifetimes.ll
@@ -4,15 +4,15 @@
; INTERESTING: store
; REDUCED: define void @test(ptr %a) {
; REDUCED-NEXT: %a1 = alloca i32
-; REDUCED-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr %a1)
+; REDUCED-NEXT: call void @llvm.lifetime.start.p0(ptr %a1)
; REDUCED-NEXT: store i32 0, ptr %a
; REDUCED-NEXT: store i32 1, ptr %a
-; REDUCED-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr %a1)
+; REDUCED-NEXT: call void @llvm.lifetime.end.p0(ptr %a1)
define void @test() {
%a = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.start.p0(ptr %a)
store i32 0, ptr %a
store i32 1, ptr %a
- call void @llvm.lifetime.end.p0(i64 4, ptr %a)
+ call void @llvm.lifetime.end.p0(ptr %a)
ret void
}
diff --git a/llvm/test/tools/llvm-reduce/reduce-operands-alloca.ll b/llvm/test/tools/llvm-reduce/reduce-operands-alloca.ll
index b68f718..75b152f 100644
--- a/llvm/test/tools/llvm-reduce/reduce-operands-alloca.ll
+++ b/llvm/test/tools/llvm-reduce/reduce-operands-alloca.ll
@@ -69,13 +69,13 @@ define void @alloca_constexpr_elt() {
}
; CHECK-LABEL: @alloca_lifetimes(
-; ZERO: call void @llvm.lifetime.start.p0(i64 4, ptr %alloca)
-; ONE: call void @llvm.lifetime.start.p0(i64 4, ptr %alloca)
-; POISON: call void @llvm.lifetime.start.p0(i64 4, ptr %alloca)
+; ZERO: call void @llvm.lifetime.start.p0(ptr %alloca)
+; ONE: call void @llvm.lifetime.start.p0(ptr %alloca)
+; POISON: call void @llvm.lifetime.start.p0(ptr %alloca)
define void @alloca_lifetimes() {
%alloca = alloca i32
- call void @llvm.lifetime.start.p0(i64 4, ptr %alloca)
+ call void @llvm.lifetime.start.p0(ptr %alloca)
store i32 0, ptr %alloca
- call void @llvm.lifetime.end.p0(i64 4, ptr %alloca)
+ call void @llvm.lifetime.end.p0(ptr %alloca)
ret void
}