diff options
Diffstat (limited to 'llvm')
180 files changed, 5247 insertions, 2203 deletions
diff --git a/llvm/docs/CommandGuide/index.rst b/llvm/docs/CommandGuide/index.rst index f85f32a..8f080de 100644 --- a/llvm/docs/CommandGuide/index.rst +++ b/llvm/docs/CommandGuide/index.rst @@ -92,6 +92,7 @@ Developer Tools llvm-pdbutil llvm-profgen llvm-tli-checker + llvm-offload-binary Remarks Tools ~~~~~~~~~~~~~~ diff --git a/llvm/docs/CommandGuide/llvm-offload-binary.rst b/llvm/docs/CommandGuide/llvm-offload-binary.rst new file mode 100644 index 0000000..960b12d --- /dev/null +++ b/llvm/docs/CommandGuide/llvm-offload-binary.rst @@ -0,0 +1,185 @@ +llvm-offload-binary - LLVM Offload Binary Packager +================================================== + +.. program:: llvm-offload-binary + +SYNOPSIS +-------- + +:program:`llvm-offload-binary` [*options*] [*input files...*] + +DESCRIPTION +----------- + +:program:`llvm-offload-binary` is a utility for bundling multiple device object +files into a single binary container. The resulting binary can then be embedded +into the host section table to form a fat binary containing offloading code for +different targets. Conversely, it can also extract previously bundled device +images. + +The binary format begins with the magic bytes ``0x10FF10AD``, followed by a +version and size. Each binary contains its own header, allowing tools to locate +offloading sections even when merged by a linker. Each offload entry includes +metadata such as the device image kind, producer kind, and key-value string +metadata. Multiple offloading images are concatenated to form a fat binary. + +EXAMPLE +------- + +.. code-block:: console + + # Package multiple device images into a fat binary: + $ llvm-offload-binary -o out.bin \ + --image=file=input.o,triple=nvptx64,arch=sm_70 + + # Extract a matching image from a fat binary: + $ llvm-offload-binary in.bin \ + --image=file=output.o,triple=nvptx64,arch=sm_70 + + # Extract and archive images into a static library: + $ llvm-offload-binary in.bin --archive -o libdevice.a + +OPTIONS +------- + +.. option:: --archive + + When extracting from an input binary, write all extracted images into a static + archive instead of separate files. + +.. option:: --image=<<key>=<value>,...> + + Specify a set of arbitrary key-value arguments describing an image. + Commonly used optional keys include ``arch`` (e.g. ``sm_70`` for CUDA) and + ``triple`` (e.g. nvptx64-nvidia-cuda). + +.. option:: -o <file> + + Write output to <file>. When bundling, this specifies the fat binary filename. + When extracting, this specifies the archive or output file destination. + +.. option:: --help, -h + + Display available options. Use ``--help-hidden`` to show hidden options. + +.. option:: --help-list + + Display a list of all options. Use ``--help-list-hidden`` to show hidden ones. + +.. option:: --version + + Display the version of the :program:`llvm-offload-binary` executable. + +.. option:: @<FILE> + + Read command-line options from response file `<FILE>`. + +BINARY FORMAT +------------- + +The binary format is marked by the magic bytes ``0x10FF10AD``, followed by a +version number. Each created binary contains its own header. This allows tools +to locate offloading sections even after linker operations such as relocatable +linking. Conceptually, this binary format is a serialization of a string map and +an image buffer. + +.. table:: Offloading Binary Header + :name: table-binary_header + + +----------+--------------+----------------------------------------------------+ + | Type | Identifier | Description | + +==========+==============+====================================================+ + | uint8_t | magic | The magic bytes for the binary format (0x10FF10AD) | + +----------+--------------+----------------------------------------------------+ + | uint32_t | version | Version of this format (currently version 1) | + +----------+--------------+----------------------------------------------------+ + | uint64_t | size | Size of this binary in bytes | + +----------+--------------+----------------------------------------------------+ + | uint64_t | entry offset | Absolute offset of the offload entries in bytes | + +----------+--------------+----------------------------------------------------+ + | uint64_t | entry size | Size of the offload entries in bytes | + +----------+--------------+----------------------------------------------------+ + +Each offload entry describes a bundled image along with its associated metadata. + +.. table:: Offloading Entry Table + :name: table-binary_entry + + +----------+---------------+----------------------------------------------------+ + | Type | Identifier | Description | + +==========+===============+====================================================+ + | uint16_t | image kind | The kind of the device image (e.g. bc, cubin) | + +----------+---------------+----------------------------------------------------+ + | uint16_t | offload kind | The producer of the image (e.g. openmp, cuda) | + +----------+---------------+----------------------------------------------------+ + | uint32_t | flags | Generic flags for the image | + +----------+---------------+----------------------------------------------------+ + | uint64_t | string offset | Absolute offset of the string metadata table | + +----------+---------------+----------------------------------------------------+ + | uint64_t | num strings | Number of string entries in the table | + +----------+---------------+----------------------------------------------------+ + | uint64_t | image offset | Absolute offset of the device image in bytes | + +----------+---------------+----------------------------------------------------+ + | uint64_t | image size | Size of the device image in bytes | + +----------+---------------+----------------------------------------------------+ + +The entry table refers to both a string table and the raw device image itself. +The string table provides arbitrary key-value metadata. + +.. table:: Offloading String Entry + :name: table-binary_string + + +----------+--------------+-------------------------------------------------------+ + | Type | Identifier | Description | + +==========+==============+=======================================================+ + | uint64_t | key offset | Absolute byte offset of the key in the string table | + +----------+--------------+-------------------------------------------------------+ + | uint64_t | value offset | Absolute byte offset of the value in the string table | + +----------+--------------+-------------------------------------------------------+ + +The string table is a collection of null-terminated strings stored in the image. +Offsets allow string entries to be interpreted as key-value pairs, enabling +flexible metadata such as architecture or target triple. + +The enumerated values for ``image kind`` and ``offload kind`` are: + +.. table:: Image Kind + :name: table-image_kind + + +---------------+-------+---------------------------------------+ + | Name | Value | Description | + +===============+=======+=======================================+ + | IMG_None | 0x00 | No image information provided | + +---------------+-------+---------------------------------------+ + | IMG_Object | 0x01 | The image is a generic object file | + +---------------+-------+---------------------------------------+ + | IMG_Bitcode | 0x02 | The image is an LLVM-IR bitcode file | + +---------------+-------+---------------------------------------+ + | IMG_Cubin | 0x03 | The image is a CUDA object file | + +---------------+-------+---------------------------------------+ + | IMG_Fatbinary | 0x04 | The image is a CUDA fatbinary file | + +---------------+-------+---------------------------------------+ + | IMG_PTX | 0x05 | The image is a CUDA PTX file | + +---------------+-------+---------------------------------------+ + +.. table:: Offload Kind + :name: table-offload_kind + + +------------+-------+---------------------------------------+ + | Name | Value | Description | + +============+=======+=======================================+ + | OFK_None | 0x00 | No offloading information provided | + +------------+-------+---------------------------------------+ + | OFK_OpenMP | 0x01 | The producer was OpenMP offloading | + +------------+-------+---------------------------------------+ + | OFK_CUDA | 0x02 | The producer was CUDA | + +------------+-------+---------------------------------------+ + | OFK_HIP | 0x03 | The producer was HIP | + +------------+-------+---------------------------------------+ + | OFK_SYCL | 0x04 | The producer was SYCL | + +------------+-------+---------------------------------------+ + +SEE ALSO +-------- + +:manpage:`clang(1)`, :manpage:`llvm-objdump(1)` diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst index 20bd811..6d0e828 100644 --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -2529,6 +2529,9 @@ For example: if the attributed function is called during invocation of a function attributed with ``sanitize_realtime``. This attribute is incompatible with the ``sanitize_realtime`` attribute. +``sanitize_alloc_token`` + This attribute indicates that implicit allocation token instrumentation + is enabled for this function. ``speculative_load_hardening`` This attribute indicates that `Speculative Load Hardening <https://llvm.org/docs/SpeculativeLoadHardening.html>`_ @@ -8577,6 +8580,21 @@ Example: The ``nofree`` metadata indicates the memory pointed by the pointer will not be freed after the attached instruction. +'``alloc_token``' Metadata +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``alloc_token`` metadata may be attached to calls to memory allocation +functions, and contains richer semantic information about the type of the +allocation. This information is consumed by the ``alloc-token`` pass to +instrument such calls with allocation token IDs. + +The metadata contains a string with the type of an allocation. + +.. code-block:: none + + call ptr @malloc(i64 64), !alloc_token !0 + + !0 = !{!"<type-name>"} Module Flags Metadata ===================== diff --git a/llvm/docs/OptBisect.rst b/llvm/docs/OptBisect.rst index 0e4d31a..e3ba078 100644 --- a/llvm/docs/OptBisect.rst +++ b/llvm/docs/OptBisect.rst @@ -8,7 +8,7 @@ Using -opt-bisect-limit to debug optimization errors Introduction ============ -The -opt-bisect-limit option provides a way to disable all optimization passes +The ``-opt-bisect-limit`` option provides a way to disable all optimization passes above a specified limit without modifying the way in which the Pass Managers are populated. The intention of this option is to assist in tracking down problems where incorrect transformations during optimization result in incorrect @@ -19,10 +19,10 @@ skipped while still allowing correct code generation call a function to check the opt-bisect limit before performing optimizations. Passes which either must be run or do not modify the IR do not perform this check and are therefore never skipped. Generally, this means analysis passes, passes -that are run at CodeGenOptLevel::None and passes which are required for register +that are run at ``CodeGenOptLevel::None`` and passes which are required for register allocation. -The -opt-bisect-limit option can be used with any tool, including front ends +The ``-opt-bisect-limit`` option can be used with any tool, including front ends such as clang, that uses the core LLVM library for optimization and code generation. The exact syntax for invoking the option is discussed below. @@ -36,7 +36,7 @@ transformations that is difficult to replicate with tools like opt and llc. Getting Started =============== -The -opt-bisect-limit command line option can be passed directly to tools such +The ``-opt-bisect-limit`` command-line option can be passed directly to tools such as opt, llc and lli. The syntax is as follows: :: @@ -49,17 +49,17 @@ indicating the index value that is associated with that optimization. To skip optimizations, pass the value of the last optimization to be performed as the opt-bisect-limit. All optimizations with a higher index value will be skipped. -In order to use the -opt-bisect-limit option with a driver that provides a +In order to use the ``-opt-bisect-limit`` option with a driver that provides a wrapper around the LLVM core library, an additional prefix option may be required, as defined by the driver. For example, to use this option with -clang, the "-mllvm" prefix must be used. A typical clang invocation would look +clang, the ``-mllvm`` prefix must be used. A typical clang invocation would look like this: :: clang -O2 -mllvm -opt-bisect-limit=256 my_file.c -The -opt-bisect-limit option may also be applied to link-time optimizations by +The ``-opt-bisect-limit`` option may also be applied to link-time optimizations by using a prefix to indicate that this is a plug-in option for the linker. The following syntax will set a bisect limit for LTO transformations: @@ -72,11 +72,11 @@ following syntax will set a bisect limit for LTO transformations: LTO passes are run by a library instance invoked by the linker. Therefore any passes run in the primary driver compilation phase are not affected by options -passed via '-Wl,-plugin-opt' and LTO passes are not affected by options -passed to the driver-invoked LLVM invocation via '-mllvm'. +passed via ``-Wl,-plugin-opt`` and LTO passes are not affected by options +passed to the driver-invoked LLVM invocation via ``-mllvm``. Passing ``-opt-bisect-print-ir-path=path/foo.ll`` will dump the IR to -``path/foo.ll`` when -opt-bisect-limit starts skipping passes. +``path/foo.ll`` when ``-opt-bisect-limit`` starts skipping passes. Bisection Index Values ====================== @@ -85,7 +85,7 @@ The granularity of the optimizations associated with a single index value is variable. Depending on how the optimization pass has been instrumented the value may be associated with as much as all transformations that would have been performed by an optimization pass on an IR unit for which it is invoked -(for instance, during a single call of runOnFunction for a FunctionPass) or as +(for instance, during a single call of ``runOnFunction`` for a ``FunctionPass``) or as little as a single transformation. The index values may also be nested so that if an invocation of the pass is not skipped individual transformations within that invocation may still be skipped. @@ -99,7 +99,7 @@ is not a problem. When an opt-bisect index value refers to an entire invocation of the run function for a pass, the pass will query whether or not it should be skipped each time it is invoked and each invocation will be assigned a unique value. -For example, if a FunctionPass is used with a module containing three functions +For example, if a ``FunctionPass`` is used with a module containing three functions a different index value will be assigned to the pass for each of the functions as the pass is run. The pass may be run on two functions but skipped for the third. @@ -144,13 +144,13 @@ Example Usage Pass Skipping Implementation ============================ -The -opt-bisect-limit implementation depends on individual passes opting in to -the opt-bisect process. The OptBisect object that manages the process is +The ``-opt-bisect-limit`` implementation depends on individual passes opting in to +the opt-bisect process. The ``OptBisect`` object that manages the process is entirely passive and has no knowledge of how any pass is implemented. When a -pass is run if the pass may be skipped, it should call the OptBisect object to +pass is run if the pass may be skipped, it should call the ``OptBisect`` object to see if it should be skipped. -The OptBisect object is intended to be accessed through LLVMContext and each +The ``OptBisect`` object is intended to be accessed through ``LLVMContext`` and each Pass base class contains a helper function that abstracts the details in order to make this check uniform across all passes. These helper functions are: @@ -160,7 +160,7 @@ to make this check uniform across all passes. These helper functions are: bool FunctionPass::skipFunction(const Function &F); bool LoopPass::skipLoop(const Loop *L); -A MachineFunctionPass should use FunctionPass::skipFunction() as such: +A ``MachineFunctionPass`` should use ``FunctionPass::skipFunction()`` as such: .. code-block:: c++ @@ -170,11 +170,11 @@ A MachineFunctionPass should use FunctionPass::skipFunction() as such: // Otherwise, run the pass normally. } -In addition to checking with the OptBisect class to see if the pass should be -skipped, the skipFunction(), skipLoop() and skipBasicBlock() helper functions -also look for the presence of the "optnone" function attribute. The calling +In addition to checking with the ``OptBisect`` class to see if the pass should be +skipped, the ``skipFunction()``, ``skipLoop()`` and ``skipBasicBlock()`` helper functions +also look for the presence of the ``optnone`` function attribute. The calling pass will be unable to determine whether it is being skipped because the -"optnone" attribute is present or because the opt-bisect-limit has been +``optnone`` attribute is present or because the ``opt-bisect-limit`` has been reached. This is desirable because the behavior should be the same in either case. diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md index 85c16b9c..79d93d0 100644 --- a/llvm/docs/ReleaseNotes.md +++ b/llvm/docs/ReleaseNotes.md @@ -146,6 +146,8 @@ Changes to the Python bindings Changes to the C API -------------------- +* Add `LLVMGetOrInsertFunction` to get or insert a function, replacing the combination of `LLVMGetNamedFunction` and `LLVMAddFunction`. + Changes to the CodeGen infrastructure ------------------------------------- @@ -177,6 +179,10 @@ Changes to Sanitizers Other Changes ------------- +* Introduces the `AllocToken` pass, an instrumentation pass providing tokens to + memory allocators enabling various heap organization strategies, such as heap + partitioning. + External Open Source Projects Using LLVM {{env.config.release}} =============================================================== diff --git a/llvm/include/llvm-c/Core.h b/llvm/include/llvm-c/Core.h index d02cf98..3d22f859 100644 --- a/llvm/include/llvm-c/Core.h +++ b/llvm/include/llvm-c/Core.h @@ -1207,6 +1207,22 @@ LLVM_C_ABI LLVMValueRef LLVMAddFunction(LLVMModuleRef M, const char *Name, LLVMTypeRef FunctionTy); /** + * Obtain or insert a function into a module. + * + * If a function with the specified name already exists in the module, it + * is returned. Otherwise, a new function is created in the module with the + * specified name and type and is returned. + * + * The returned value corresponds to a llvm::Function instance. + * + * @see llvm::Module::getOrInsertFunction() + */ +LLVM_C_ABI LLVMValueRef LLVMGetOrInsertFunction(LLVMModuleRef M, + const char *Name, + size_t NameLen, + LLVMTypeRef FunctionTy); + +/** * Obtain a Function value from a Module by its name. * * The returned value corresponds to a llvm::Function value. diff --git a/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/llvm/include/llvm/Bitcode/LLVMBitCodes.h index 1c7d346..464f475 100644 --- a/llvm/include/llvm/Bitcode/LLVMBitCodes.h +++ b/llvm/include/llvm/Bitcode/LLVMBitCodes.h @@ -800,6 +800,7 @@ enum AttributeKindCodes { ATTR_KIND_SANITIZE_TYPE = 101, ATTR_KIND_CAPTURES = 102, ATTR_KIND_DEAD_ON_RETURN = 103, + ATTR_KIND_SANITIZE_ALLOC_TOKEN = 104, }; enum ComdatSelectionKindCodes { diff --git a/llvm/include/llvm/CodeGen/LiveRangeEdit.h b/llvm/include/llvm/CodeGen/LiveRangeEdit.h index 6473138a..d0ed3ff 100644 --- a/llvm/include/llvm/CodeGen/LiveRangeEdit.h +++ b/llvm/include/llvm/CodeGen/LiveRangeEdit.h @@ -75,24 +75,14 @@ private: /// FirstNew - Index of the first register added to NewRegs. const unsigned FirstNew; - /// ScannedRemattable - true when remattable values have been identified. - bool ScannedRemattable = false; - /// DeadRemats - The saved instructions which have already been dead after /// rematerialization but not deleted yet -- to be done in postOptimization. SmallPtrSet<MachineInstr *, 32> *DeadRemats; - /// Remattable - Values defined by remattable instructions as identified by - /// tii.isTriviallyReMaterializable(). - SmallPtrSet<const VNInfo *, 4> Remattable; - /// Rematted - Values that were actually rematted, and so need to have their /// live range trimmed or entirely removed. SmallPtrSet<const VNInfo *, 4> Rematted; - /// scanRemattable - Identify the Parent values that may rematerialize. - void scanRemattable(); - /// foldAsLoad - If LI has a single use and a single def that can be folded as /// a load, eliminate the register by folding the def into the use. bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr *> &Dead); @@ -175,11 +165,6 @@ public: Register create() { return createFrom(getReg()); } - /// anyRematerializable - Return true if any parent values may be - /// rematerializable. This function must be called before - /// canRematerializeAt is called.. - bool anyRematerializable(); - /// Remat - Information needed to rematerialize at a specific location. struct Remat { const VNInfo *const ParentVNI; // parent_'s value at the remat location. @@ -189,9 +174,9 @@ public: explicit Remat(const VNInfo *ParentVNI) : ParentVNI(ParentVNI) {} }; - /// canRematerializeAt - Determine if ParentVNI can be rematerialized at + /// canRematerializeAt - Determine if RM.Orig can be rematerialized at /// UseIdx. It is assumed that parent_.getVNINfoAt(UseIdx) == ParentVNI. - bool canRematerializeAt(Remat &RM, VNInfo *OrigVNI, SlotIndex UseIdx); + bool canRematerializeAt(Remat &RM, SlotIndex UseIdx); /// rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an /// instruction into MBB before MI. The new instruction is mapped, but diff --git a/llvm/include/llvm/IR/Attributes.td b/llvm/include/llvm/IR/Attributes.td index ef816fb..8e7d9dc 100644 --- a/llvm/include/llvm/IR/Attributes.td +++ b/llvm/include/llvm/IR/Attributes.td @@ -342,6 +342,9 @@ def SanitizeRealtime : EnumAttr<"sanitize_realtime", IntersectPreserve, [FnAttr] /// during a real-time sanitized function (see `sanitize_realtime`). def SanitizeRealtimeBlocking : EnumAttr<"sanitize_realtime_blocking", IntersectPreserve, [FnAttr]>; +/// Allocation token instrumentation is on. +def SanitizeAllocToken : EnumAttr<"sanitize_alloc_token", IntersectPreserve, [FnAttr]>; + /// Speculative Load Hardening is enabled. /// /// Note that this uses the default compatibility (always compatible during diff --git a/llvm/include/llvm/IR/FixedMetadataKinds.def b/llvm/include/llvm/IR/FixedMetadataKinds.def index 0603abc..74746cc 100644 --- a/llvm/include/llvm/IR/FixedMetadataKinds.def +++ b/llvm/include/llvm/IR/FixedMetadataKinds.def @@ -56,3 +56,4 @@ LLVM_FIXED_MD_KIND(MD_noalias_addrspace, "noalias.addrspace", 41) LLVM_FIXED_MD_KIND(MD_callee_type, "callee_type", 42) LLVM_FIXED_MD_KIND(MD_nofree, "nofree", 43) LLVM_FIXED_MD_KIND(MD_captures, "captures", 44) +LLVM_FIXED_MD_KIND(MD_alloc_token, "alloc_token", 45) diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h index 14685ab..9f56779 100644 --- a/llvm/include/llvm/IR/InstrTypes.h +++ b/llvm/include/llvm/IR/InstrTypes.h @@ -601,11 +601,9 @@ public: Instruction::CastOps firstOpcode, ///< Opcode of first cast Instruction::CastOps secondOpcode, ///< Opcode of second cast Type *SrcTy, ///< SrcTy of 1st cast - Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast - Type *DstTy, ///< DstTy of 2nd cast - Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null - Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null - Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null + Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast + Type *DstTy, ///< DstTy of 2nd cast + const DataLayout *DL ///< Optional data layout ); /// Return the opcode of this CastInst diff --git a/llvm/include/llvm/Support/CrashRecoveryContext.h b/llvm/include/llvm/Support/CrashRecoveryContext.h index 773de89..ffee81d 100644 --- a/llvm/include/llvm/Support/CrashRecoveryContext.h +++ b/llvm/include/llvm/Support/CrashRecoveryContext.h @@ -80,9 +80,6 @@ public: /// make as little assumptions as possible about the program state when /// RunSafely has returned false. LLVM_ABI bool RunSafely(function_ref<void()> Fn); - bool RunSafely(void (*Fn)(void*), void *UserData) { - return RunSafely([&]() { Fn(UserData); }); - } /// Execute the provide callback function (with the given arguments) in /// a protected context which is run in another thread (optionally with a @@ -94,10 +91,6 @@ public: /// propagated to the new thread as well. LLVM_ABI bool RunSafelyOnThread(function_ref<void()>, unsigned RequestedStackSize = 0); - bool RunSafelyOnThread(void (*Fn)(void*), void *UserData, - unsigned RequestedStackSize = 0) { - return RunSafelyOnThread([&]() { Fn(UserData); }, RequestedStackSize); - } LLVM_ABI bool RunSafelyOnNewStack(function_ref<void()>, unsigned RequestedStackSize = 0); diff --git a/llvm/include/llvm/Support/SpecialCaseList.h b/llvm/include/llvm/Support/SpecialCaseList.h index 22a62ea..c2c9271 100644 --- a/llvm/include/llvm/Support/SpecialCaseList.h +++ b/llvm/include/llvm/Support/SpecialCaseList.h @@ -147,7 +147,9 @@ protected: Section(StringRef Str, unsigned FileIdx) : SectionStr(Str), FileIdx(FileIdx) {}; - std::unique_ptr<Matcher> SectionMatcher = std::make_unique<Matcher>(); + Section(Section &&) = default; + + Matcher SectionMatcher; SectionEntries Entries; std::string SectionStr; unsigned FileIdx; diff --git a/llvm/include/llvm/Transforms/Instrumentation/AllocToken.h b/llvm/include/llvm/Transforms/Instrumentation/AllocToken.h new file mode 100644 index 0000000..b1391cb0 --- /dev/null +++ b/llvm/include/llvm/Transforms/Instrumentation/AllocToken.h @@ -0,0 +1,46 @@ +//===- AllocToken.h - Allocation token instrumentation --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the AllocTokenPass, an instrumentation pass that +// replaces allocation calls with ones including an allocation token. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_ALLOCTOKEN_H +#define LLVM_TRANSFORMS_INSTRUMENTATION_ALLOCTOKEN_H + +#include "llvm/IR/Analysis.h" +#include "llvm/IR/PassManager.h" +#include <optional> + +namespace llvm { + +class Module; + +struct AllocTokenOptions { + std::optional<uint64_t> MaxTokens; + bool FastABI = false; + bool Extended = false; + AllocTokenOptions() = default; +}; + +/// A module pass that rewrites heap allocations to use token-enabled +/// allocation functions based on various source-level properties. +class AllocTokenPass : public PassInfoMixin<AllocTokenPass> { +public: + LLVM_ABI explicit AllocTokenPass(AllocTokenOptions Opts = {}); + LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM); + static bool isRequired() { return true; } + +private: + const AllocTokenOptions Options; +}; + +} // namespace llvm + +#endif // LLVM_TRANSFORMS_INSTRUMENTATION_ALLOCTOKEN_H diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index d52b073..b744537 100755 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -1482,6 +1482,15 @@ Constant *llvm::ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL) { assert(Instruction::isCast(Opcode)); + + if (auto *CE = dyn_cast<ConstantExpr>(C)) + if (CE->isCast()) + if (unsigned NewOp = CastInst::isEliminableCastPair( + Instruction::CastOps(CE->getOpcode()), + Instruction::CastOps(Opcode), CE->getOperand(0)->getType(), + C->getType(), DestTy, &DL)) + return ConstantFoldCastOperand(NewOp, CE->getOperand(0), DestTy, DL); + switch (Opcode) { default: llvm_unreachable("Missing case"); diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 0d978d4..d1977f0 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -5425,15 +5425,8 @@ static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, if (Src->getType() == Ty) { auto FirstOp = CI->getOpcode(); auto SecondOp = static_cast<Instruction::CastOps>(CastOpc); - Type *SrcIntPtrTy = - SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr; - Type *MidIntPtrTy = - MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr; - Type *DstIntPtrTy = - DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr; if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy, - SrcIntPtrTy, MidIntPtrTy, - DstIntPtrTy) == Instruction::BitCast) + &Q.DL) == Instruction::BitCast) return Src; } } @@ -6473,7 +6466,8 @@ static Value *foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1) { static Value *foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1) { assert((IID == Intrinsic::maxnum || IID == Intrinsic::minnum || - IID == Intrinsic::maximum || IID == Intrinsic::minimum) && + IID == Intrinsic::maximum || IID == Intrinsic::minimum || + IID == Intrinsic::maximumnum || IID == Intrinsic::minimumnum) && "Unsupported intrinsic"); auto *M0 = dyn_cast<IntrinsicInst>(Op0); @@ -6512,6 +6506,82 @@ static Value *foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0, return nullptr; } +enum class MinMaxOptResult { + CannotOptimize = 0, + UseNewConstVal = 1, + UseOtherVal = 2, + // For undef/poison, we can choose to either propgate undef/poison or + // use the LHS value depending on what will allow more optimization. + UseEither = 3 +}; +// Get the optimized value for a min/max instruction with a single constant +// input (either undef or scalar constantFP). The result may indicate to +// use the non-const LHS value, use a new constant value instead (with NaNs +// quieted), or to choose either option in the case of undef/poison. +static MinMaxOptResult OptimizeConstMinMax(const Constant *RHSConst, + const Intrinsic::ID IID, + const CallBase *Call, + Constant **OutNewConstVal) { + assert(OutNewConstVal != nullptr); + + bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum; + bool PropagateSNaN = IID == Intrinsic::minnum || IID == Intrinsic::maxnum; + bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum || + IID == Intrinsic::minimumnum; + + // min/max(x, poison) -> either x or poison + if (isa<UndefValue>(RHSConst)) { + *OutNewConstVal = const_cast<Constant *>(RHSConst); + return MinMaxOptResult::UseEither; + } + + const ConstantFP *CFP = dyn_cast<ConstantFP>(RHSConst); + if (!CFP) + return MinMaxOptResult::CannotOptimize; + APFloat CAPF = CFP->getValueAPF(); + + // minnum(x, qnan) -> x + // maxnum(x, qnan) -> x + // minnum(x, snan) -> qnan + // maxnum(x, snan) -> qnan + // minimum(X, nan) -> qnan + // maximum(X, nan) -> qnan + // minimumnum(X, nan) -> x + // maximumnum(X, nan) -> x + if (CAPF.isNaN()) { + if (PropagateNaN || (PropagateSNaN && CAPF.isSignaling())) { + *OutNewConstVal = ConstantFP::get(CFP->getType(), CAPF.makeQuiet()); + return MinMaxOptResult::UseNewConstVal; + } + return MinMaxOptResult::UseOtherVal; + } + + if (CAPF.isInfinity() || (Call && Call->hasNoInfs() && CAPF.isLargest())) { + // minnum(X, -inf) -> -inf (ignoring sNaN -> qNaN propagation) + // maxnum(X, +inf) -> +inf (ignoring sNaN -> qNaN propagation) + // minimum(X, -inf) -> -inf if nnan + // maximum(X, +inf) -> +inf if nnan + // minimumnum(X, -inf) -> -inf + // maximumnum(X, +inf) -> +inf + if (CAPF.isNegative() == IsMin && + (!PropagateNaN || (Call && Call->hasNoNaNs()))) { + *OutNewConstVal = const_cast<Constant *>(RHSConst); + return MinMaxOptResult::UseNewConstVal; + } + + // minnum(X, +inf) -> X if nnan + // maxnum(X, -inf) -> X if nnan + // minimum(X, +inf) -> X (ignoring quieting of sNaNs) + // maximum(X, -inf) -> X (ignoring quieting of sNaNs) + // minimumnum(X, +inf) -> X if nnan + // maximumnum(X, -inf) -> X if nnan + if (CAPF.isNegative() != IsMin && + (PropagateNaN || (Call && Call->hasNoNaNs()))) + return MinMaxOptResult::UseOtherVal; + } + return MinMaxOptResult::CannotOptimize; +} + Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, Value *Op0, Value *Op1, const SimplifyQuery &Q, @@ -6780,8 +6850,17 @@ Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, case Intrinsic::maxnum: case Intrinsic::minnum: case Intrinsic::maximum: - case Intrinsic::minimum: { - // If the arguments are the same, this is a no-op. + case Intrinsic::minimum: + case Intrinsic::maximumnum: + case Intrinsic::minimumnum: { + // In several cases here, we deviate from exact IEEE 754 semantics + // to enable optimizations (as allowed by the LLVM IR spec). + // + // For instance, we may return one of the arguments unmodified instead of + // inserting an llvm.canonicalize to transform input sNaNs into qNaNs, + // or may assume all NaN inputs are qNaNs. + + // If the arguments are the same, this is a no-op (ignoring NaN quieting) if (Op0 == Op1) return Op0; @@ -6789,40 +6868,55 @@ Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, if (isa<Constant>(Op0)) std::swap(Op0, Op1); - // If an argument is undef, return the other argument. - if (Q.isUndefValue(Op1)) - return Op0; + if (Constant *C = dyn_cast<Constant>(Op1)) { + MinMaxOptResult OptResult = MinMaxOptResult::CannotOptimize; + Constant *NewConst = nullptr; + + if (VectorType *VTy = dyn_cast<VectorType>(C->getType())) { + ElementCount ElemCount = VTy->getElementCount(); + + if (Constant *SplatVal = C->getSplatValue()) { + // Handle splat vectors (including scalable vectors) + OptResult = OptimizeConstMinMax(SplatVal, IID, Call, &NewConst); + if (OptResult == MinMaxOptResult::UseNewConstVal) + NewConst = ConstantVector::getSplat(ElemCount, NewConst); + + } else if (ElemCount.isFixed()) { + // Storage to build up new const return value (with NaNs quieted) + SmallVector<Constant *, 16> NewC(ElemCount.getFixedValue()); + + // Check elementwise whether we can optimize to either a constant + // value or return the LHS value. We cannot mix and match LHS + + // constant elements, as this would require inserting a new + // VectorShuffle instruction, which is not allowed in simplifyBinOp. + OptResult = MinMaxOptResult::UseEither; + for (unsigned i = 0; i != ElemCount.getFixedValue(); ++i) { + auto ElemResult = OptimizeConstMinMax(C->getAggregateElement(i), + IID, Call, &NewConst); + if (ElemResult == MinMaxOptResult::CannotOptimize || + (ElemResult != OptResult && + OptResult != MinMaxOptResult::UseEither && + ElemResult != MinMaxOptResult::UseEither)) { + OptResult = MinMaxOptResult::CannotOptimize; + break; + } + NewC[i] = NewConst; + if (ElemResult != MinMaxOptResult::UseEither) + OptResult = ElemResult; + } + if (OptResult == MinMaxOptResult::UseNewConstVal) + NewConst = ConstantVector::get(NewC); + } + } else { + // Handle scalar inputs + OptResult = OptimizeConstMinMax(C, IID, Call, &NewConst); + } - bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum; - bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum; - - // minnum(X, nan) -> X - // maxnum(X, nan) -> X - // minimum(X, nan) -> nan - // maximum(X, nan) -> nan - if (match(Op1, m_NaN())) - return PropagateNaN ? propagateNaN(cast<Constant>(Op1)) : Op0; - - // In the following folds, inf can be replaced with the largest finite - // float, if the ninf flag is set. - const APFloat *C; - if (match(Op1, m_APFloat(C)) && - (C->isInfinity() || (Call && Call->hasNoInfs() && C->isLargest()))) { - // minnum(X, -inf) -> -inf - // maxnum(X, +inf) -> +inf - // minimum(X, -inf) -> -inf if nnan - // maximum(X, +inf) -> +inf if nnan - if (C->isNegative() == IsMin && - (!PropagateNaN || (Call && Call->hasNoNaNs()))) - return ConstantFP::get(ReturnType, *C); - - // minnum(X, +inf) -> X if nnan - // maxnum(X, -inf) -> X if nnan - // minimum(X, +inf) -> X - // maximum(X, -inf) -> X - if (C->isNegative() != IsMin && - (PropagateNaN || (Call && Call->hasNoNaNs()))) - return Op0; + if (OptResult == MinMaxOptResult::UseOtherVal || + OptResult == MinMaxOptResult::UseEither) + return Op0; // Return the other arg (ignoring NaN quieting) + else if (OptResult == MinMaxOptResult::UseNewConstVal) + return NewConst; } // Min/max of the same operation with common operand: diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp index 0c4e3a2..4c2e1fe 100644 --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -37,17 +37,13 @@ static bool isDereferenceableAndAlignedPointerViaAssumption( function_ref<bool(const RetainedKnowledge &RK)> CheckSize, const DataLayout &DL, const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT) { - // Dereferenceable information from assumptions is only valid if the value - // cannot be freed between the assumption and use. For now just use the - // information for values that cannot be freed in the function. - // TODO: More precisely check if the pointer can be freed between assumption - // and use. - if (!CtxI || Ptr->canBeFreed()) + if (!CtxI) return false; /// Look through assumes to see if both dereferencability and alignment can /// be proven by an assume if needed. RetainedKnowledge AlignRK; RetainedKnowledge DerefRK; + bool PtrCanBeFreed = Ptr->canBeFreed(); bool IsAligned = Ptr->getPointerAlignment(DL) >= Alignment; return getKnowledgeForValue( Ptr, {Attribute::Dereferenceable, Attribute::Alignment}, *AC, @@ -56,7 +52,11 @@ static bool isDereferenceableAndAlignedPointerViaAssumption( return false; if (RK.AttrKind == Attribute::Alignment) AlignRK = std::max(AlignRK, RK); - if (RK.AttrKind == Attribute::Dereferenceable) + + // Dereferenceable information from assumptions is only valid if the + // value cannot be freed between the assumption and use. + if ((!PtrCanBeFreed || willNotFreeBetween(Assume, CtxI)) && + RK.AttrKind == Attribute::Dereferenceable) DerefRK = std::max(DerefRK, RK); IsAligned |= AlignRK && AlignRK.ArgValue >= Alignment.value(); if (IsAligned && DerefRK && CheckSize(DerefRK)) @@ -390,7 +390,11 @@ bool llvm::isDereferenceableAndAlignedInLoop( } else return false; - Instruction *HeaderFirstNonPHI = &*L->getHeader()->getFirstNonPHIIt(); + Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt(); + if (BasicBlock *LoopPred = L->getLoopPredecessor()) { + if (isa<BranchInst>(LoopPred->getTerminator())) + CtxI = LoopPred->getTerminator(); + } return isDereferenceableAndAlignedPointerViaAssumption( Base, Alignment, [&SE, AccessSizeSCEV, &LoopGuards](const RetainedKnowledge &RK) { @@ -399,9 +403,9 @@ bool llvm::isDereferenceableAndAlignedInLoop( SE.applyLoopGuards(AccessSizeSCEV, *LoopGuards), SE.applyLoopGuards(SE.getSCEV(RK.IRArgValue), *LoopGuards)); }, - DL, HeaderFirstNonPHI, AC, &DT) || + DL, CtxI, AC, &DT) || isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL, - HeaderFirstNonPHI, AC, &DT); + CtxI, AC, &DT); } static bool suppressSpeculativeLoadForSanitizers(const Instruction &CtxI) { diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index a42c061..9655c88 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -9095,6 +9095,10 @@ Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) { case Intrinsic::minimum: return Intrinsic::maximum; case Intrinsic::maxnum: return Intrinsic::minnum; case Intrinsic::minnum: return Intrinsic::maxnum; + case Intrinsic::maximumnum: + return Intrinsic::minimumnum; + case Intrinsic::minimumnum: + return Intrinsic::maximumnum; default: llvm_unreachable("Unexpected intrinsic"); } } diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index 832aa9f..aaee1f0 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -2203,6 +2203,8 @@ static Attribute::AttrKind getAttrFromCode(uint64_t Code) { return Attribute::SanitizeRealtime; case bitc::ATTR_KIND_SANITIZE_REALTIME_BLOCKING: return Attribute::SanitizeRealtimeBlocking; + case bitc::ATTR_KIND_SANITIZE_ALLOC_TOKEN: + return Attribute::SanitizeAllocToken; case bitc::ATTR_KIND_SPECULATIVE_LOAD_HARDENING: return Attribute::SpeculativeLoadHardening; case bitc::ATTR_KIND_SWIFT_ERROR: diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp index c4070e1..6d86809 100644 --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -883,6 +883,8 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) { return bitc::ATTR_KIND_STRUCT_RET; case Attribute::SanitizeAddress: return bitc::ATTR_KIND_SANITIZE_ADDRESS; + case Attribute::SanitizeAllocToken: + return bitc::ATTR_KIND_SANITIZE_ALLOC_TOKEN; case Attribute::SanitizeHWAddress: return bitc::ATTR_KIND_SANITIZE_HWADDRESS; case Attribute::SanitizeThread: diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp index 0c2b74c..d6e8505 100644 --- a/llvm/lib/CodeGen/InlineSpiller.cpp +++ b/llvm/lib/CodeGen/InlineSpiller.cpp @@ -671,10 +671,22 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) { LiveInterval &OrigLI = LIS.getInterval(Original); VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx); - LiveRangeEdit::Remat RM(ParentVNI); - RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def); + assert(OrigVNI && "corrupted sub-interval"); + MachineInstr *DefMI = LIS.getInstructionFromIndex(OrigVNI->def); + // This can happen if for two reasons: 1) This could be a phi valno, + // or 2) the remat def has already been removed from the original + // live interval; this happens if we rematted to all uses, and + // then further split one of those live ranges. + if (!DefMI) { + markValueUsed(&VirtReg, ParentVNI); + LLVM_DEBUG(dbgs() << "\tcannot remat missing def for " << UseIdx << '\t' + << MI); + return false; + } - if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx)) { + LiveRangeEdit::Remat RM(ParentVNI); + RM.OrigMI = DefMI; + if (!Edit->canRematerializeAt(RM, UseIdx)) { markValueUsed(&VirtReg, ParentVNI); LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI); return false; @@ -739,9 +751,6 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) { /// reMaterializeAll - Try to rematerialize as many uses as possible, /// and trim the live ranges after. void InlineSpiller::reMaterializeAll() { - if (!Edit->anyRematerializable()) - return; - UsedValues.clear(); // Try to remat before all uses of snippets. diff --git a/llvm/lib/CodeGen/LiveRangeEdit.cpp b/llvm/lib/CodeGen/LiveRangeEdit.cpp index 59bc82d..5b0365d 100644 --- a/llvm/lib/CodeGen/LiveRangeEdit.cpp +++ b/llvm/lib/CodeGen/LiveRangeEdit.cpp @@ -68,41 +68,12 @@ Register LiveRangeEdit::createFrom(Register OldReg) { return VReg; } -void LiveRangeEdit::scanRemattable() { - for (VNInfo *VNI : getParent().valnos) { - if (VNI->isUnused()) - continue; - Register Original = VRM->getOriginal(getReg()); - LiveInterval &OrigLI = LIS.getInterval(Original); - VNInfo *OrigVNI = OrigLI.getVNInfoAt(VNI->def); - if (!OrigVNI) - continue; - MachineInstr *DefMI = LIS.getInstructionFromIndex(OrigVNI->def); - if (!DefMI) - continue; - if (TII.isReMaterializable(*DefMI)) - Remattable.insert(OrigVNI); - } - ScannedRemattable = true; -} - -bool LiveRangeEdit::anyRematerializable() { - if (!ScannedRemattable) - scanRemattable(); - return !Remattable.empty(); -} - -bool LiveRangeEdit::canRematerializeAt(Remat &RM, VNInfo *OrigVNI, - SlotIndex UseIdx) { - assert(ScannedRemattable && "Call anyRematerializable first"); +bool LiveRangeEdit::canRematerializeAt(Remat &RM, SlotIndex UseIdx) { + assert(RM.OrigMI && "No defining instruction for remattable value"); - // Use scanRemattable info. - if (!Remattable.count(OrigVNI)) + if (!TII.isReMaterializable(*RM.OrigMI)) return false; - // No defining instruction provided. - assert(RM.OrigMI && "No defining instruction for remattable value"); - // Verify that all used registers are available with the same values. if (!VirtRegAuxInfo::allUsesAvailableAt(RM.OrigMI, UseIdx, LIS, MRI, TII)) return false; @@ -303,6 +274,37 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink) { } } + // If the dest of MI is an original reg and MI is reMaterializable, + // don't delete the inst. Replace the dest with a new reg, and keep + // the inst for remat of other siblings. The inst is saved in + // LiveRangeEdit::DeadRemats and will be deleted after all the + // allocations of the func are done. Note that if we keep the + // instruction with the original operands, that handles the physreg + // operand case (described just below) as well. + // However, immediately delete instructions which have unshrunk virtual + // register uses. That may provoke RA to split an interval at the KILL + // and later result in an invalid live segment end. + if (isOrigDef && DeadRemats && !HasLiveVRegUses && + TII.isReMaterializable(*MI)) { + LiveInterval &NewLI = createEmptyIntervalFrom(Dest, false); + VNInfo::Allocator &Alloc = LIS.getVNInfoAllocator(); + VNInfo *VNI = NewLI.getNextValue(Idx, Alloc); + NewLI.addSegment(LiveInterval::Segment(Idx, Idx.getDeadSlot(), VNI)); + + if (DestSubReg) { + const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo(); + auto *SR = + NewLI.createSubRange(Alloc, TRI->getSubRegIndexLaneMask(DestSubReg)); + SR->addSegment(LiveInterval::Segment(Idx, Idx.getDeadSlot(), + SR->getNextValue(Idx, Alloc))); + } + + pop_back(); + DeadRemats->insert(MI); + const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); + MI->substituteRegister(Dest, NewLI.reg(), 0, TRI); + assert(MI->registerDefIsDead(NewLI.reg(), &TRI)); + } // Currently, we don't support DCE of physreg live ranges. If MI reads // any unreserved physregs, don't erase the instruction, but turn it into // a KILL instead. This way, the physreg live ranges don't end up @@ -310,7 +312,7 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink) { // FIXME: It would be better to have something like shrinkToUses() for // physregs. That could potentially enable more DCE and it would free up // the physreg. It would not happen often, though. - if (ReadsPhysRegs) { + else if (ReadsPhysRegs) { MI->setDesc(TII.get(TargetOpcode::KILL)); // Remove all operands that aren't physregs. for (unsigned i = MI->getNumOperands(); i; --i) { @@ -322,41 +324,11 @@ void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink) { MI->dropMemRefs(*MI->getMF()); LLVM_DEBUG(dbgs() << "Converted physregs to:\t" << *MI); } else { - // If the dest of MI is an original reg and MI is reMaterializable, - // don't delete the inst. Replace the dest with a new reg, and keep - // the inst for remat of other siblings. The inst is saved in - // LiveRangeEdit::DeadRemats and will be deleted after all the - // allocations of the func are done. - // However, immediately delete instructions which have unshrunk virtual - // register uses. That may provoke RA to split an interval at the KILL - // and later result in an invalid live segment end. - if (isOrigDef && DeadRemats && !HasLiveVRegUses && - TII.isReMaterializable(*MI)) { - LiveInterval &NewLI = createEmptyIntervalFrom(Dest, false); - VNInfo::Allocator &Alloc = LIS.getVNInfoAllocator(); - VNInfo *VNI = NewLI.getNextValue(Idx, Alloc); - NewLI.addSegment(LiveInterval::Segment(Idx, Idx.getDeadSlot(), VNI)); - - if (DestSubReg) { - const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo(); - auto *SR = NewLI.createSubRange( - Alloc, TRI->getSubRegIndexLaneMask(DestSubReg)); - SR->addSegment(LiveInterval::Segment(Idx, Idx.getDeadSlot(), - SR->getNextValue(Idx, Alloc))); - } - - pop_back(); - DeadRemats->insert(MI); - const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); - MI->substituteRegister(Dest, NewLI.reg(), 0, TRI); - assert(MI->registerDefIsDead(NewLI.reg(), &TRI)); - } else { - if (TheDelegate) - TheDelegate->LRE_WillEraseInstruction(MI); - LIS.RemoveMachineInstrFromMaps(*MI); - MI->eraseFromParent(); - ++NumDCEDeleted; - } + if (TheDelegate) + TheDelegate->LRE_WillEraseInstruction(MI); + LIS.RemoveMachineInstrFromMaps(*MI); + MI->eraseFromParent(); + ++NumDCEDeleted; } // Erase any virtregs that are now empty and unused. There may be <undef> diff --git a/llvm/lib/CodeGen/SplitKit.cpp b/llvm/lib/CodeGen/SplitKit.cpp index f118ee5..f9ecb2c 100644 --- a/llvm/lib/CodeGen/SplitKit.cpp +++ b/llvm/lib/CodeGen/SplitKit.cpp @@ -376,8 +376,6 @@ void SplitEditor::reset(LiveRangeEdit &LRE, ComplementSpillMode SM) { if (SpillMode) LICalc[1].reset(&VRM.getMachineFunction(), LIS.getSlotIndexes(), &MDT, &LIS.getVNInfoAllocator()); - - Edit->anyRematerializable(); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) @@ -638,7 +636,7 @@ VNInfo *SplitEditor::defFromParent(unsigned RegIdx, const VNInfo *ParentVNI, LiveRangeEdit::Remat RM(ParentVNI); RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def); if (RM.OrigMI && TII.isAsCheapAsAMove(*RM.OrigMI) && - Edit->canRematerializeAt(RM, OrigVNI, UseIdx)) { + Edit->canRematerializeAt(RM, UseIdx)) { if (!rematWillIncreaseRestriction(RM.OrigMI, MBB, UseIdx)) { SlotIndex Def = Edit->rematerializeAt(MBB, I, Reg, RM, TRI, Late); ++NumRemats; diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp index 6b202ba..3842b1a 100644 --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -55,15 +55,8 @@ foldConstantCastPair( Type *MidTy = Op->getType(); Instruction::CastOps firstOp = Instruction::CastOps(Op->getOpcode()); Instruction::CastOps secondOp = Instruction::CastOps(opc); - - // Assume that pointers are never more than 64 bits wide, and only use this - // for the middle type. Otherwise we could end up folding away illegal - // bitcasts between address spaces with different sizes. - IntegerType *FakeIntPtrTy = Type::getInt64Ty(DstTy->getContext()); - - // Let CastInst::isEliminableCastPair do the heavy lifting. return CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, DstTy, - nullptr, FakeIntPtrTy, nullptr); + /*DL=*/nullptr); } static Constant *FoldBitCast(Constant *V, Type *DestTy) { diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp index df0c85b..3f1cc1e 100644 --- a/llvm/lib/IR/Core.cpp +++ b/llvm/lib/IR/Core.cpp @@ -2403,6 +2403,14 @@ LLVMValueRef LLVMAddFunction(LLVMModuleRef M, const char *Name, GlobalValue::ExternalLinkage, Name, unwrap(M))); } +LLVMValueRef LLVMGetOrInsertFunction(LLVMModuleRef M, const char *Name, + size_t NameLen, LLVMTypeRef FunctionTy) { + return wrap(unwrap(M) + ->getOrInsertFunction(StringRef(Name, NameLen), + unwrap<FunctionType>(FunctionTy)) + .getCallee()); +} + LLVMValueRef LLVMGetNamedFunction(LLVMModuleRef M, const char *Name) { return wrap(unwrap(M)->getFunction(Name)); } diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp index 941e41f..88e7c44 100644 --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -2824,10 +2824,10 @@ bool CastInst::isNoopCast(const DataLayout &DL) const { /// The function returns a resultOpcode so these two casts can be replaced with: /// * %Replacement = resultOpcode %SrcTy %x to DstTy /// If no such cast is permitted, the function returns 0. -unsigned CastInst::isEliminableCastPair( - Instruction::CastOps firstOp, Instruction::CastOps secondOp, - Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, - Type *DstIntPtrTy) { +unsigned CastInst::isEliminableCastPair(Instruction::CastOps firstOp, + Instruction::CastOps secondOp, + Type *SrcTy, Type *MidTy, Type *DstTy, + const DataLayout *DL) { // Define the 144 possibilities for these two cast instructions. The values // in this matrix determine what to do in a given situation and select the // case in the switch below. The rows correspond to firstOp, the columns @@ -2936,24 +2936,16 @@ unsigned CastInst::isEliminableCastPair( return 0; // Cannot simplify if address spaces are different! - if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) + if (SrcTy != DstTy) return 0; - unsigned MidSize = MidTy->getScalarSizeInBits(); - // We can still fold this without knowing the actual sizes as long we - // know that the intermediate pointer is the largest possible + // Cannot simplify if the intermediate integer size is smaller than the // pointer size. - // FIXME: Is this always true? - if (MidSize == 64) - return Instruction::BitCast; - - // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size. - if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) + unsigned MidSize = MidTy->getScalarSizeInBits(); + if (!DL || MidSize < DL->getPointerTypeSizeInBits(SrcTy)) return 0; - unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); - if (MidSize >= PtrSize) - return Instruction::BitCast; - return 0; + + return Instruction::BitCast; } case 8: { // ext, trunc -> bitcast, if the SrcTy and DstTy are the same @@ -2973,14 +2965,17 @@ unsigned CastInst::isEliminableCastPair( // zext, sext -> zext, because sext can't sign extend after zext return Instruction::ZExt; case 11: { - // inttoptr, ptrtoint/ptrtoaddr -> bitcast if SrcSize<=PtrSize and - // SrcSize==DstSize - if (!MidIntPtrTy) + // inttoptr, ptrtoint/ptrtoaddr -> bitcast if SrcSize<=PtrSize/AddrSize + // and SrcSize==DstSize + if (!DL) return 0; - unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); + unsigned MidSize = secondOp == Instruction::PtrToAddr + ? DL->getAddressSizeInBits(MidTy) + : DL->getPointerTypeSizeInBits(MidTy); unsigned SrcSize = SrcTy->getScalarSizeInBits(); unsigned DstSize = DstTy->getScalarSizeInBits(); - if (SrcSize <= PtrSize && SrcSize == DstSize) + // TODO: Could also produce zext or trunc here. + if (SrcSize <= MidSize && SrcSize == DstSize) return Instruction::BitCast; return 0; } diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index 6b3cd27..71a8a38 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -543,6 +543,7 @@ private: void visitAliasScopeListMetadata(const MDNode *MD); void visitAccessGroupMetadata(const MDNode *MD); void visitCapturesMetadata(Instruction &I, const MDNode *Captures); + void visitAllocTokenMetadata(Instruction &I, MDNode *MD); template <class Ty> bool isValidMetadataArray(const MDTuple &N); #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N); @@ -5395,6 +5396,12 @@ void Verifier::visitCapturesMetadata(Instruction &I, const MDNode *Captures) { } } +void Verifier::visitAllocTokenMetadata(Instruction &I, MDNode *MD) { + Check(isa<CallBase>(I), "!alloc_token should only exist on calls", &I); + Check(MD->getNumOperands() == 1, "!alloc_token must have 1 operand", MD); + Check(isa<MDString>(MD->getOperand(0)), "expected string", MD); +} + /// verifyInstruction - Verify that an instruction is well formed. /// void Verifier::visitInstruction(Instruction &I) { @@ -5625,6 +5632,9 @@ void Verifier::visitInstruction(Instruction &I) { if (MDNode *Captures = I.getMetadata(LLVMContext::MD_captures)) visitCapturesMetadata(I, Captures); + if (MDNode *MD = I.getMetadata(LLVMContext::MD_alloc_token)) + visitAllocTokenMetadata(I, MD); + if (MDNode *N = I.getDebugLoc().getAsMDNode()) { CheckDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N); visitMDNode(*N, AreDebugLocsAllowed::Yes); diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index c234623..20dcde8 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -240,6 +240,7 @@ #include "llvm/Transforms/IPO/WholeProgramDevirt.h" #include "llvm/Transforms/InstCombine/InstCombine.h" #include "llvm/Transforms/Instrumentation/AddressSanitizer.h" +#include "llvm/Transforms/Instrumentation/AllocToken.h" #include "llvm/Transforms/Instrumentation/BoundsChecking.h" #include "llvm/Transforms/Instrumentation/CGProfile.h" #include "llvm/Transforms/Instrumentation/ControlHeightReduction.h" diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def index 88550ea..c5c0d64 100644 --- a/llvm/lib/Passes/PassRegistry.def +++ b/llvm/lib/Passes/PassRegistry.def @@ -125,6 +125,7 @@ MODULE_PASS("openmp-opt", OpenMPOptPass()) MODULE_PASS("openmp-opt-postlink", OpenMPOptPass(ThinOrFullLTOPhase::FullLTOPostLink)) MODULE_PASS("partial-inliner", PartialInlinerPass()) +MODULE_PASS("alloc-token", AllocTokenPass()) MODULE_PASS("pgo-icall-prom", PGOIndirectCallPromotion()) MODULE_PASS("pgo-instr-gen", PGOInstrumentationGen()) MODULE_PASS("pgo-instr-use", PGOInstrumentationUse()) diff --git a/llvm/lib/Support/SpecialCaseList.cpp b/llvm/lib/Support/SpecialCaseList.cpp index 8d4e043..4b03885 100644 --- a/llvm/lib/Support/SpecialCaseList.cpp +++ b/llvm/lib/Support/SpecialCaseList.cpp @@ -135,7 +135,7 @@ SpecialCaseList::addSection(StringRef SectionStr, unsigned FileNo, Sections.emplace_back(SectionStr, FileNo); auto &Section = Sections.back(); - if (auto Err = Section.SectionMatcher->insert(SectionStr, LineNo, UseGlobs)) { + if (auto Err = Section.SectionMatcher.insert(SectionStr, LineNo, UseGlobs)) { return createStringError(errc::invalid_argument, "malformed section at line " + Twine(LineNo) + ": '" + SectionStr + @@ -218,7 +218,7 @@ std::pair<unsigned, unsigned> SpecialCaseList::inSectionBlame(StringRef Section, StringRef Prefix, StringRef Query, StringRef Category) const { for (const auto &S : reverse(Sections)) { - if (S.SectionMatcher->match(Section)) { + if (S.SectionMatcher.match(Section)) { unsigned Blame = inSectionBlame(S.Entries, Prefix, Query, Category); if (Blame) return {S.FileIdx, Blame}; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index 9ca8194..56194fe 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -137,13 +137,10 @@ InstCombinerImpl::isEliminableCastPair(const CastInst *CI1, Instruction::CastOps secondOp = CI2->getOpcode(); Type *SrcIntPtrTy = SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr; - Type *MidIntPtrTy = - MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr; Type *DstIntPtrTy = DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr; unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, - DstTy, SrcIntPtrTy, MidIntPtrTy, - DstIntPtrTy); + DstTy, &DL); // We don't want to form an inttoptr or ptrtoint that converts to an integer // type that differs from the pointer size. diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index cdae9a7..3704ad7 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -2662,7 +2662,7 @@ void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB, G->eraseFromParent(); NewGlobals[i] = NewGlobal; - Constant *ODRIndicator = ConstantPointerNull::get(PtrTy); + Constant *ODRIndicator = Constant::getNullValue(IntptrTy); GlobalValue *InstrumentedGlobal = NewGlobal; bool CanUsePrivateAliases = @@ -2677,8 +2677,7 @@ void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB, // ODR should not happen for local linkage. if (NewGlobal->hasLocalLinkage()) { - ODRIndicator = - ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), PtrTy); + ODRIndicator = ConstantInt::get(IntptrTy, -1); } else if (UseOdrIndicator) { // With local aliases, we need to provide another externally visible // symbol __odr_asan_XXX to detect ODR violation. @@ -2692,7 +2691,7 @@ void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB, ODRIndicatorSym->setVisibility(NewGlobal->getVisibility()); ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass()); ODRIndicatorSym->setAlignment(Align(1)); - ODRIndicator = ODRIndicatorSym; + ODRIndicator = ConstantExpr::getPtrToInt(ODRIndicatorSym, IntptrTy); } Constant *Initializer = ConstantStruct::get( @@ -2703,8 +2702,7 @@ void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB, ConstantExpr::getPointerCast(Name, IntptrTy), ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy), ConstantInt::get(IntptrTy, MD.IsDynInit), - Constant::getNullValue(IntptrTy), - ConstantExpr::getPointerCast(ODRIndicator, IntptrTy)); + Constant::getNullValue(IntptrTy), ODRIndicator); LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); diff --git a/llvm/lib/Transforms/Instrumentation/AllocToken.cpp b/llvm/lib/Transforms/Instrumentation/AllocToken.cpp new file mode 100644 index 0000000..782d5a1 --- /dev/null +++ b/llvm/lib/Transforms/Instrumentation/AllocToken.cpp @@ -0,0 +1,494 @@ +//===- AllocToken.cpp - Allocation token instrumentation ------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements AllocToken, an instrumentation pass that +// replaces allocation calls with token-enabled versions. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Instrumentation/AllocToken.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Analysis/MemoryBuiltins.h" +#include "llvm/Analysis/OptimizationRemarkEmitter.h" +#include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/IR/Analysis.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/InstIterator.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/PassManager.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/RandomNumberGenerator.h" +#include "llvm/Support/SipHash.h" +#include "llvm/Support/raw_ostream.h" +#include <cassert> +#include <cstddef> +#include <cstdint> +#include <limits> +#include <memory> +#include <optional> +#include <string> +#include <utility> +#include <variant> + +using namespace llvm; + +#define DEBUG_TYPE "alloc-token" + +namespace { + +//===--- Constants --------------------------------------------------------===// + +enum class TokenMode : unsigned { + /// Incrementally increasing token ID. + Increment = 0, + + /// Simple mode that returns a statically-assigned random token ID. + Random = 1, + + /// Token ID based on allocated type hash. + TypeHash = 2, +}; + +//===--- Command-line options ---------------------------------------------===// + +cl::opt<TokenMode> + ClMode("alloc-token-mode", cl::Hidden, cl::desc("Token assignment mode"), + cl::init(TokenMode::TypeHash), + cl::values(clEnumValN(TokenMode::Increment, "increment", + "Incrementally increasing token ID"), + clEnumValN(TokenMode::Random, "random", + "Statically-assigned random token ID"), + clEnumValN(TokenMode::TypeHash, "typehash", + "Token ID based on allocated type hash"))); + +cl::opt<std::string> ClFuncPrefix("alloc-token-prefix", + cl::desc("The allocation function prefix"), + cl::Hidden, cl::init("__alloc_token_")); + +cl::opt<uint64_t> ClMaxTokens("alloc-token-max", + cl::desc("Maximum number of tokens (0 = no max)"), + cl::Hidden, cl::init(0)); + +cl::opt<bool> + ClFastABI("alloc-token-fast-abi", + cl::desc("The token ID is encoded in the function name"), + cl::Hidden, cl::init(false)); + +// Instrument libcalls only by default - compatible allocators only need to take +// care of providing standard allocation functions. With extended coverage, also +// instrument non-libcall allocation function calls with !alloc_token +// metadata. +cl::opt<bool> + ClExtended("alloc-token-extended", + cl::desc("Extend coverage to custom allocation functions"), + cl::Hidden, cl::init(false)); + +// C++ defines ::operator new (and variants) as replaceable (vs. standard +// library versions), which are nobuiltin, and are therefore not covered by +// isAllocationFn(). Cover by default, as users of AllocToken are already +// required to provide token-aware allocation functions (no defaults). +cl::opt<bool> ClCoverReplaceableNew("alloc-token-cover-replaceable-new", + cl::desc("Cover replaceable operator new"), + cl::Hidden, cl::init(true)); + +cl::opt<uint64_t> ClFallbackToken( + "alloc-token-fallback", + cl::desc("The default fallback token where none could be determined"), + cl::Hidden, cl::init(0)); + +//===--- Statistics -------------------------------------------------------===// + +STATISTIC(NumFunctionsInstrumented, "Functions instrumented"); +STATISTIC(NumAllocationsInstrumented, "Allocations instrumented"); + +//===----------------------------------------------------------------------===// + +/// Returns the !alloc_token metadata if available. +/// +/// Expected format is: !{<type-name>} +MDNode *getAllocTokenMetadata(const CallBase &CB) { + MDNode *Ret = CB.getMetadata(LLVMContext::MD_alloc_token); + if (!Ret) + return nullptr; + assert(Ret->getNumOperands() == 1 && "bad !alloc_token"); + assert(isa<MDString>(Ret->getOperand(0))); + return Ret; +} + +class ModeBase { +public: + explicit ModeBase(const IntegerType &TokenTy, uint64_t MaxTokens) + : MaxTokens(MaxTokens ? MaxTokens : TokenTy.getBitMask()) { + assert(MaxTokens <= TokenTy.getBitMask()); + } + +protected: + uint64_t boundedToken(uint64_t Val) const { + assert(MaxTokens != 0); + return Val % MaxTokens; + } + + const uint64_t MaxTokens; +}; + +/// Implementation for TokenMode::Increment. +class IncrementMode : public ModeBase { +public: + using ModeBase::ModeBase; + + uint64_t operator()(const CallBase &CB, OptimizationRemarkEmitter &) { + return boundedToken(Counter++); + } + +private: + uint64_t Counter = 0; +}; + +/// Implementation for TokenMode::Random. +class RandomMode : public ModeBase { +public: + RandomMode(const IntegerType &TokenTy, uint64_t MaxTokens, + std::unique_ptr<RandomNumberGenerator> RNG) + : ModeBase(TokenTy, MaxTokens), RNG(std::move(RNG)) {} + uint64_t operator()(const CallBase &CB, OptimizationRemarkEmitter &) { + return boundedToken((*RNG)()); + } + +private: + std::unique_ptr<RandomNumberGenerator> RNG; +}; + +/// Implementation for TokenMode::TypeHash. The implementation ensures +/// hashes are stable across different compiler invocations. Uses SipHash as the +/// hash function. +class TypeHashMode : public ModeBase { +public: + using ModeBase::ModeBase; + + uint64_t operator()(const CallBase &CB, OptimizationRemarkEmitter &ORE) { + if (MDNode *N = getAllocTokenMetadata(CB)) { + MDString *S = cast<MDString>(N->getOperand(0)); + return boundedToken(getStableSipHash(S->getString())); + } + remarkNoMetadata(CB, ORE); + return ClFallbackToken; + } + + /// Remark that there was no precise type information. + static void remarkNoMetadata(const CallBase &CB, + OptimizationRemarkEmitter &ORE) { + ORE.emit([&] { + ore::NV FuncNV("Function", CB.getParent()->getParent()); + const Function *Callee = CB.getCalledFunction(); + ore::NV CalleeNV("Callee", Callee ? Callee->getName() : "<unknown>"); + return OptimizationRemark(DEBUG_TYPE, "NoAllocToken", &CB) + << "Call to '" << CalleeNV << "' in '" << FuncNV + << "' without source-level type token"; + }); + } +}; + +// Apply opt overrides. +AllocTokenOptions transformOptionsFromCl(AllocTokenOptions Opts) { + if (!Opts.MaxTokens.has_value()) + Opts.MaxTokens = ClMaxTokens; + Opts.FastABI |= ClFastABI; + Opts.Extended |= ClExtended; + return Opts; +} + +class AllocToken { +public: + explicit AllocToken(AllocTokenOptions Opts, Module &M, + ModuleAnalysisManager &MAM) + : Options(transformOptionsFromCl(std::move(Opts))), Mod(M), + FAM(MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager()), + Mode(IncrementMode(*IntPtrTy, *Options.MaxTokens)) { + switch (ClMode.getValue()) { + case TokenMode::Increment: + break; + case TokenMode::Random: + Mode.emplace<RandomMode>(*IntPtrTy, *Options.MaxTokens, + M.createRNG(DEBUG_TYPE)); + break; + case TokenMode::TypeHash: + Mode.emplace<TypeHashMode>(*IntPtrTy, *Options.MaxTokens); + break; + } + } + + bool instrumentFunction(Function &F); + +private: + /// Returns the LibFunc (or NotLibFunc) if this call should be instrumented. + std::optional<LibFunc> + shouldInstrumentCall(const CallBase &CB, const TargetLibraryInfo &TLI) const; + + /// Returns true for functions that are eligible for instrumentation. + static bool isInstrumentableLibFunc(LibFunc Func, const CallBase &CB, + const TargetLibraryInfo &TLI); + + /// Returns true for isAllocationFn() functions that we should ignore. + static bool ignoreInstrumentableLibFunc(LibFunc Func); + + /// Replace a call/invoke with a call/invoke to the allocation function + /// with token ID. + bool replaceAllocationCall(CallBase *CB, LibFunc Func, + OptimizationRemarkEmitter &ORE, + const TargetLibraryInfo &TLI); + + /// Return replacement function for a LibFunc that takes a token ID. + FunctionCallee getTokenAllocFunction(const CallBase &CB, uint64_t TokenID, + LibFunc OriginalFunc); + + /// Return the token ID from metadata in the call. + uint64_t getToken(const CallBase &CB, OptimizationRemarkEmitter &ORE) { + return std::visit([&](auto &&Mode) { return Mode(CB, ORE); }, Mode); + } + + const AllocTokenOptions Options; + Module &Mod; + IntegerType *IntPtrTy = Mod.getDataLayout().getIntPtrType(Mod.getContext()); + FunctionAnalysisManager &FAM; + // Cache for replacement functions. + DenseMap<std::pair<LibFunc, uint64_t>, FunctionCallee> TokenAllocFunctions; + // Selected mode. + std::variant<IncrementMode, RandomMode, TypeHashMode> Mode; +}; + +bool AllocToken::instrumentFunction(Function &F) { + // Do not apply any instrumentation for naked functions. + if (F.hasFnAttribute(Attribute::Naked)) + return false; + if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation)) + return false; + // Don't touch available_externally functions, their actual body is elsewhere. + if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) + return false; + // Only instrument functions that have the sanitize_alloc_token attribute. + if (!F.hasFnAttribute(Attribute::SanitizeAllocToken)) + return false; + + auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); + auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F); + SmallVector<std::pair<CallBase *, LibFunc>, 4> AllocCalls; + + // Collect all allocation calls to avoid iterator invalidation. + for (Instruction &I : instructions(F)) { + auto *CB = dyn_cast<CallBase>(&I); + if (!CB) + continue; + if (std::optional<LibFunc> Func = shouldInstrumentCall(*CB, TLI)) + AllocCalls.emplace_back(CB, Func.value()); + } + + bool Modified = false; + for (auto &[CB, Func] : AllocCalls) + Modified |= replaceAllocationCall(CB, Func, ORE, TLI); + + if (Modified) + NumFunctionsInstrumented++; + return Modified; +} + +std::optional<LibFunc> +AllocToken::shouldInstrumentCall(const CallBase &CB, + const TargetLibraryInfo &TLI) const { + const Function *Callee = CB.getCalledFunction(); + if (!Callee) + return std::nullopt; + + // Ignore nobuiltin of the CallBase, so that we can cover nobuiltin libcalls + // if requested via isInstrumentableLibFunc(). Note that isAllocationFn() is + // returning false for nobuiltin calls. + LibFunc Func; + if (TLI.getLibFunc(*Callee, Func)) { + if (isInstrumentableLibFunc(Func, CB, TLI)) + return Func; + } else if (Options.Extended && getAllocTokenMetadata(CB)) { + return NotLibFunc; + } + + return std::nullopt; +} + +bool AllocToken::isInstrumentableLibFunc(LibFunc Func, const CallBase &CB, + const TargetLibraryInfo &TLI) { + if (ignoreInstrumentableLibFunc(Func)) + return false; + + if (isAllocationFn(&CB, &TLI)) + return true; + + switch (Func) { + // These libfuncs don't return normal pointers, and are therefore not handled + // by isAllocationFn(). + case LibFunc_posix_memalign: + case LibFunc_size_returning_new: + case LibFunc_size_returning_new_hot_cold: + case LibFunc_size_returning_new_aligned: + case LibFunc_size_returning_new_aligned_hot_cold: + return true; + + // See comment above ClCoverReplaceableNew. + case LibFunc_Znwj: + case LibFunc_ZnwjRKSt9nothrow_t: + case LibFunc_ZnwjSt11align_val_t: + case LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t: + case LibFunc_Znwm: + case LibFunc_Znwm12__hot_cold_t: + case LibFunc_ZnwmRKSt9nothrow_t: + case LibFunc_ZnwmRKSt9nothrow_t12__hot_cold_t: + case LibFunc_ZnwmSt11align_val_t: + case LibFunc_ZnwmSt11align_val_t12__hot_cold_t: + case LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t: + case LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t12__hot_cold_t: + case LibFunc_Znaj: + case LibFunc_ZnajRKSt9nothrow_t: + case LibFunc_ZnajSt11align_val_t: + case LibFunc_ZnajSt11align_val_tRKSt9nothrow_t: + case LibFunc_Znam: + case LibFunc_Znam12__hot_cold_t: + case LibFunc_ZnamRKSt9nothrow_t: + case LibFunc_ZnamRKSt9nothrow_t12__hot_cold_t: + case LibFunc_ZnamSt11align_val_t: + case LibFunc_ZnamSt11align_val_t12__hot_cold_t: + case LibFunc_ZnamSt11align_val_tRKSt9nothrow_t: + case LibFunc_ZnamSt11align_val_tRKSt9nothrow_t12__hot_cold_t: + return ClCoverReplaceableNew; + + default: + return false; + } +} + +bool AllocToken::ignoreInstrumentableLibFunc(LibFunc Func) { + switch (Func) { + case LibFunc_strdup: + case LibFunc_dunder_strdup: + case LibFunc_strndup: + case LibFunc_dunder_strndup: + return true; + default: + return false; + } +} + +bool AllocToken::replaceAllocationCall(CallBase *CB, LibFunc Func, + OptimizationRemarkEmitter &ORE, + const TargetLibraryInfo &TLI) { + uint64_t TokenID = getToken(*CB, ORE); + + FunctionCallee TokenAlloc = getTokenAllocFunction(*CB, TokenID, Func); + if (!TokenAlloc) + return false; + NumAllocationsInstrumented++; + + if (Options.FastABI) { + assert(TokenAlloc.getFunctionType()->getNumParams() == CB->arg_size()); + CB->setCalledFunction(TokenAlloc); + return true; + } + + IRBuilder<> IRB(CB); + // Original args. + SmallVector<Value *, 4> NewArgs{CB->args()}; + // Add token ID, truncated to IntPtrTy width. + NewArgs.push_back(ConstantInt::get(IntPtrTy, TokenID)); + assert(TokenAlloc.getFunctionType()->getNumParams() == NewArgs.size()); + + // Preserve invoke vs call semantics for exception handling. + CallBase *NewCall; + if (auto *II = dyn_cast<InvokeInst>(CB)) { + NewCall = IRB.CreateInvoke(TokenAlloc, II->getNormalDest(), + II->getUnwindDest(), NewArgs); + } else { + NewCall = IRB.CreateCall(TokenAlloc, NewArgs); + cast<CallInst>(NewCall)->setTailCall(CB->isTailCall()); + } + NewCall->setCallingConv(CB->getCallingConv()); + NewCall->copyMetadata(*CB); + NewCall->setAttributes(CB->getAttributes()); + + // Replace all uses and delete the old call. + CB->replaceAllUsesWith(NewCall); + CB->eraseFromParent(); + return true; +} + +FunctionCallee AllocToken::getTokenAllocFunction(const CallBase &CB, + uint64_t TokenID, + LibFunc OriginalFunc) { + std::optional<std::pair<LibFunc, uint64_t>> Key; + if (OriginalFunc != NotLibFunc) { + Key = std::make_pair(OriginalFunc, Options.FastABI ? TokenID : 0); + auto It = TokenAllocFunctions.find(*Key); + if (It != TokenAllocFunctions.end()) + return It->second; + } + + const Function *Callee = CB.getCalledFunction(); + if (!Callee) + return FunctionCallee(); + const FunctionType *OldFTy = Callee->getFunctionType(); + if (OldFTy->isVarArg()) + return FunctionCallee(); + // Copy params, and append token ID type. + Type *RetTy = OldFTy->getReturnType(); + SmallVector<Type *, 4> NewParams{OldFTy->params()}; + std::string TokenAllocName = ClFuncPrefix; + if (Options.FastABI) + TokenAllocName += utostr(TokenID) + "_"; + else + NewParams.push_back(IntPtrTy); // token ID + TokenAllocName += Callee->getName(); + FunctionType *NewFTy = FunctionType::get(RetTy, NewParams, false); + FunctionCallee TokenAlloc = Mod.getOrInsertFunction(TokenAllocName, NewFTy); + if (Function *F = dyn_cast<Function>(TokenAlloc.getCallee())) + F->copyAttributesFrom(Callee); // preserve attrs + + if (Key.has_value()) + TokenAllocFunctions[*Key] = TokenAlloc; + return TokenAlloc; +} + +} // namespace + +AllocTokenPass::AllocTokenPass(AllocTokenOptions Opts) + : Options(std::move(Opts)) {} + +PreservedAnalyses AllocTokenPass::run(Module &M, ModuleAnalysisManager &MAM) { + AllocToken Pass(Options, M, MAM); + bool Modified = false; + + for (Function &F : M) { + if (F.empty()) + continue; // declaration + Modified |= Pass.instrumentFunction(F); + } + + return Modified ? PreservedAnalyses::none().preserveSet<CFGAnalyses>() + : PreservedAnalyses::all(); +} diff --git a/llvm/lib/Transforms/Instrumentation/CMakeLists.txt b/llvm/lib/Transforms/Instrumentation/CMakeLists.txt index 15fd421..80576c6 100644 --- a/llvm/lib/Transforms/Instrumentation/CMakeLists.txt +++ b/llvm/lib/Transforms/Instrumentation/CMakeLists.txt @@ -1,5 +1,6 @@ add_llvm_component_library(LLVMInstrumentation AddressSanitizer.cpp + AllocToken.cpp BoundsChecking.cpp CGProfile.cpp ControlHeightReduction.cpp diff --git a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp index 7968a5d..ae34b4e 100644 --- a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp +++ b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp @@ -89,6 +89,7 @@ STATISTIC(NumTransforms, "Number of transformations done"); STATISTIC(NumCloned, "Number of blocks cloned"); STATISTIC(NumPaths, "Number of individual paths threaded"); +namespace llvm { static cl::opt<bool> ClViewCfgBefore("dfa-jump-view-cfg-before", cl::desc("View the CFG before DFA Jump Threading"), @@ -119,6 +120,7 @@ static cl::opt<unsigned> CostThreshold("dfa-cost-threshold", cl::desc("Maximum cost accepted for the transformation"), cl::Hidden, cl::init(50)); +} // namespace llvm static cl::opt<double> MaxClonedRate( "dfa-max-cloned-rate", @@ -127,7 +129,6 @@ static cl::opt<double> MaxClonedRate( cl::Hidden, cl::init(7.5)); namespace { - class SelectInstToUnfold { SelectInst *SI; PHINode *SIUse; @@ -141,10 +142,6 @@ public: explicit operator bool() const { return SI && SIUse; } }; -void unfold(DomTreeUpdater *DTU, LoopInfo *LI, SelectInstToUnfold SIToUnfold, - std::vector<SelectInstToUnfold> *NewSIsToUnfold, - std::vector<BasicBlock *> *NewBBs); - class DFAJumpThreading { public: DFAJumpThreading(AssumptionCache *AC, DominatorTree *DT, LoopInfo *LI, @@ -158,7 +155,8 @@ private: void unfoldSelectInstrs(DominatorTree *DT, const SmallVector<SelectInstToUnfold, 4> &SelectInsts) { - DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); + // TODO: Have everything use a single lazy DTU + DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); SmallVector<SelectInstToUnfold, 4> Stack(SelectInsts); while (!Stack.empty()) { @@ -173,16 +171,18 @@ private: } } + static void unfold(DomTreeUpdater *DTU, LoopInfo *LI, + SelectInstToUnfold SIToUnfold, + std::vector<SelectInstToUnfold> *NewSIsToUnfold, + std::vector<BasicBlock *> *NewBBs); + AssumptionCache *AC; DominatorTree *DT; LoopInfo *LI; TargetTransformInfo *TTI; OptimizationRemarkEmitter *ORE; }; - -} // end anonymous namespace - -namespace { +} // namespace /// Unfold the select instruction held in \p SIToUnfold by replacing it with /// control flow. @@ -191,9 +191,10 @@ namespace { /// created basic blocks into \p NewBBs. /// /// TODO: merge it with CodeGenPrepare::optimizeSelectInst() if possible. -void unfold(DomTreeUpdater *DTU, LoopInfo *LI, SelectInstToUnfold SIToUnfold, - std::vector<SelectInstToUnfold> *NewSIsToUnfold, - std::vector<BasicBlock *> *NewBBs) { +void DFAJumpThreading::unfold(DomTreeUpdater *DTU, LoopInfo *LI, + SelectInstToUnfold SIToUnfold, + std::vector<SelectInstToUnfold> *NewSIsToUnfold, + std::vector<BasicBlock *> *NewBBs) { SelectInst *SI = SIToUnfold.getInst(); PHINode *SIUse = SIToUnfold.getUse(); assert(SI->hasOneUse()); @@ -348,10 +349,12 @@ void unfold(DomTreeUpdater *DTU, LoopInfo *LI, SelectInstToUnfold SIToUnfold, SI->eraseFromParent(); } +namespace { struct ClonedBlock { BasicBlock *BB; APInt State; ///< \p State corresponds to the next value of a switch stmnt. }; +} // namespace typedef std::deque<BasicBlock *> PathType; typedef std::vector<PathType> PathsType; @@ -381,6 +384,7 @@ inline raw_ostream &operator<<(raw_ostream &OS, const PathType &Path) { return OS; } +namespace { /// ThreadingPath is a path in the control flow of a loop that can be threaded /// by cloning necessary basic blocks and replacing conditional branches with /// unconditional ones. A threading path includes a list of basic blocks, the @@ -820,11 +824,13 @@ struct TransformDFA { : SwitchPaths(SwitchPaths), DT(DT), AC(AC), TTI(TTI), ORE(ORE), EphValues(EphValues) {} - void run() { + bool run() { if (isLegalAndProfitableToTransform()) { createAllExitPaths(); NumTransforms++; + return true; } + return false; } private: @@ -975,8 +981,6 @@ private: /// Transform each threading path to effectively jump thread the DFA. void createAllExitPaths() { - DomTreeUpdater DTU(*DT, DomTreeUpdater::UpdateStrategy::Eager); - // Move the switch block to the end of the path, since it will be duplicated BasicBlock *SwitchBlock = SwitchPaths->getSwitchBlock(); for (ThreadingPath &TPath : SwitchPaths->getThreadingPaths()) { @@ -993,15 +997,18 @@ private: SmallPtrSet<BasicBlock *, 16> BlocksToClean; BlocksToClean.insert_range(successors(SwitchBlock)); - for (const ThreadingPath &TPath : SwitchPaths->getThreadingPaths()) { - createExitPath(NewDefs, TPath, DuplicateMap, BlocksToClean, &DTU); - NumPaths++; - } + { + DomTreeUpdater DTU(*DT, DomTreeUpdater::UpdateStrategy::Lazy); + for (const ThreadingPath &TPath : SwitchPaths->getThreadingPaths()) { + createExitPath(NewDefs, TPath, DuplicateMap, BlocksToClean, &DTU); + NumPaths++; + } - // After all paths are cloned, now update the last successor of the cloned - // path so it skips over the switch statement - for (const ThreadingPath &TPath : SwitchPaths->getThreadingPaths()) - updateLastSuccessor(TPath, DuplicateMap, &DTU); + // After all paths are cloned, now update the last successor of the cloned + // path so it skips over the switch statement + for (const ThreadingPath &TPath : SwitchPaths->getThreadingPaths()) + updateLastSuccessor(TPath, DuplicateMap, &DTU); + } // For each instruction that was cloned and used outside, update its uses updateSSA(NewDefs); @@ -1360,6 +1367,7 @@ private: SmallPtrSet<const Value *, 32> EphValues; std::vector<ThreadingPath> TPaths; }; +} // namespace bool DFAJumpThreading::run(Function &F) { LLVM_DEBUG(dbgs() << "\nDFA Jump threading: " << F.getName() << "\n"); @@ -1426,9 +1434,8 @@ bool DFAJumpThreading::run(Function &F) { for (AllSwitchPaths SwitchPaths : ThreadableLoops) { TransformDFA Transform(&SwitchPaths, DT, AC, TTI, ORE, EphValues); - Transform.run(); - MadeChanges = true; - LoopInfoBroken = true; + if (Transform.run()) + MadeChanges = LoopInfoBroken = true; } #ifdef EXPENSIVE_CHECKS @@ -1439,8 +1446,6 @@ bool DFAJumpThreading::run(Function &F) { return MadeChanges; } -} // end anonymous namespace - /// Integrate with the new Pass Manager PreservedAnalyses DFAJumpThreadingPass::run(Function &F, FunctionAnalysisManager &AM) { diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp index bbd1ed6..5ba6f95f 100644 --- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp +++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp @@ -970,6 +970,7 @@ Function *CodeExtractor::constructFunctionDeclaration( case Attribute::SanitizeMemTag: case Attribute::SanitizeRealtime: case Attribute::SanitizeRealtimeBlocking: + case Attribute::SanitizeAllocToken: case Attribute::SpeculativeLoadHardening: case Attribute::StackProtect: case Attribute::StackProtectReq: diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp index 21b2652..b6ca52e 100644 --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -3031,6 +3031,13 @@ static void combineMetadata(Instruction *K, const Instruction *J, K->getContext(), MDNode::toCaptureComponents(JMD) | MDNode::toCaptureComponents(KMD))); break; + case LLVMContext::MD_alloc_token: + // Preserve !alloc_token if both K and J have it, and they are equal. + if (KMD == JMD) + K->setMetadata(Kind, JMD); + else + K->setMetadata(Kind, nullptr); + break; } } // Set !invariant.group from J if J has it. If both instructions have it diff --git a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp index bf882d7..6312831 100644 --- a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp +++ b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp @@ -201,18 +201,27 @@ static void ConnectProlog(Loop *L, Value *BECount, unsigned Count, /// unroll count is non-zero. /// /// This function performs the following: -/// - Update PHI nodes at the unrolling loop exit and epilog loop exit -/// - Create PHI nodes at the unrolling loop exit to combine -/// values that exit the unrolling loop code and jump around it. +/// - Update PHI nodes at the epilog loop exit +/// - Create PHI nodes at the unrolling loop exit and epilog preheader to +/// combine values that exit the unrolling loop code and jump around it. /// - Update PHI operands in the epilog loop by the new PHI nodes -/// - Branch around the epilog loop if extra iters (ModVal) is zero. +/// - At the unrolling loop exit, branch around the epilog loop if extra iters +// (ModVal) is zero. +/// - At the epilog preheader, add an llvm.assume call that extra iters is +/// non-zero. If the unrolling loop exit is the predecessor, the above new +/// branch guarantees that assumption. If the unrolling loop preheader is the +/// predecessor, then the required first iteration from the original loop has +/// yet to be executed, so it must be executed in the epilog loop. If we +/// later unroll the epilog loop, that llvm.assume call somehow enables +/// ScalarEvolution to compute a epilog loop maximum trip count, which enables +/// eliminating the branch at the end of the final unrolled epilog iteration. /// static void ConnectEpilog(Loop *L, Value *ModVal, BasicBlock *NewExit, BasicBlock *Exit, BasicBlock *PreHeader, BasicBlock *EpilogPreHeader, BasicBlock *NewPreHeader, ValueToValueMapTy &VMap, DominatorTree *DT, LoopInfo *LI, bool PreserveLCSSA, ScalarEvolution &SE, - unsigned Count) { + unsigned Count, AssumptionCache &AC) { BasicBlock *Latch = L->getLoopLatch(); assert(Latch && "Loop must have a latch"); BasicBlock *EpilogLatch = cast<BasicBlock>(VMap[Latch]); @@ -231,7 +240,7 @@ static void ConnectEpilog(Loop *L, Value *ModVal, BasicBlock *NewExit, // EpilogLatch // Exit (EpilogPN) - // Update PHI nodes at NewExit and Exit. + // Update PHI nodes at Exit. for (PHINode &PN : NewExit->phis()) { // PN should be used in another PHI located in Exit block as // Exit was split by SplitBlockPredecessors into Exit and NewExit @@ -246,15 +255,11 @@ static void ConnectEpilog(Loop *L, Value *ModVal, BasicBlock *NewExit, // epilogue edges have already been added. // // There is EpilogPreHeader incoming block instead of NewExit as - // NewExit was spilt 1 more time to get EpilogPreHeader. + // NewExit was split 1 more time to get EpilogPreHeader. assert(PN.hasOneUse() && "The phi should have 1 use"); PHINode *EpilogPN = cast<PHINode>(PN.use_begin()->getUser()); assert(EpilogPN->getParent() == Exit && "EpilogPN should be in Exit block"); - // Add incoming PreHeader from branch around the Loop - PN.addIncoming(PoisonValue::get(PN.getType()), PreHeader); - SE.forgetValue(&PN); - Value *V = PN.getIncomingValueForBlock(Latch); Instruction *I = dyn_cast<Instruction>(V); if (I && L->contains(I)) @@ -271,35 +276,52 @@ static void ConnectEpilog(Loop *L, Value *ModVal, BasicBlock *NewExit, NewExit); // Now PHIs should look like: // NewExit: - // PN = PHI [I, Latch], [poison, PreHeader] + // PN = PHI [I, Latch] // ... // Exit: // EpilogPN = PHI [PN, NewExit], [VMap[I], EpilogLatch] } - // Create PHI nodes at NewExit (from the unrolling loop Latch and PreHeader). - // Update corresponding PHI nodes in epilog loop. + // Create PHI nodes at NewExit (from the unrolling loop Latch) and at + // EpilogPreHeader (from PreHeader and NewExit). Update corresponding PHI + // nodes in epilog loop. for (BasicBlock *Succ : successors(Latch)) { // Skip this as we already updated phis in exit blocks. if (!L->contains(Succ)) continue; + + // Succ here appears to always be just L->getHeader(). Otherwise, how do we + // know its corresponding epilog block (from VMap) is EpilogHeader and thus + // EpilogPreHeader is the right incoming block for VPN, as set below? + // TODO: Can we thus avoid the enclosing loop over successors? + assert(Succ == L->getHeader() && + "Expect the only in-loop successor of latch to be the loop header"); + for (PHINode &PN : Succ->phis()) { - // Add new PHI nodes to the loop exit block and update epilog - // PHIs with the new PHI values. - PHINode *NewPN = PHINode::Create(PN.getType(), 2, PN.getName() + ".unr"); - NewPN->insertBefore(NewExit->getFirstNonPHIIt()); - // Adding a value to the new PHI node from the unrolling loop preheader. - NewPN->addIncoming(PN.getIncomingValueForBlock(NewPreHeader), PreHeader); - // Adding a value to the new PHI node from the unrolling loop latch. - NewPN->addIncoming(PN.getIncomingValueForBlock(Latch), Latch); + // Add new PHI nodes to the loop exit block. + PHINode *NewPN0 = PHINode::Create(PN.getType(), /*NumReservedValues=*/1, + PN.getName() + ".unr"); + NewPN0->insertBefore(NewExit->getFirstNonPHIIt()); + // Add value to the new PHI node from the unrolling loop latch. + NewPN0->addIncoming(PN.getIncomingValueForBlock(Latch), Latch); + + // Add new PHI nodes to EpilogPreHeader. + PHINode *NewPN1 = PHINode::Create(PN.getType(), /*NumReservedValues=*/2, + PN.getName() + ".epil.init"); + NewPN1->insertBefore(EpilogPreHeader->getFirstNonPHIIt()); + // Add value to the new PHI node from the unrolling loop preheader. + NewPN1->addIncoming(PN.getIncomingValueForBlock(NewPreHeader), PreHeader); + // Add value to the new PHI node from the epilog loop guard. + NewPN1->addIncoming(NewPN0, NewExit); // Update the existing PHI node operand with the value from the new PHI // node. Corresponding instruction in epilog loop should be PHI. PHINode *VPN = cast<PHINode>(VMap[&PN]); - VPN->setIncomingValueForBlock(EpilogPreHeader, NewPN); + VPN->setIncomingValueForBlock(EpilogPreHeader, NewPN1); } } + // In NewExit, branch around the epilog loop if no extra iters. Instruction *InsertPt = NewExit->getTerminator(); IRBuilder<> B(InsertPt); Value *BrLoopExit = B.CreateIsNotNull(ModVal, "lcmp.mod"); @@ -308,7 +330,7 @@ static void ConnectEpilog(Loop *L, Value *ModVal, BasicBlock *NewExit, SmallVector<BasicBlock*, 4> Preds(predecessors(Exit)); SplitBlockPredecessors(Exit, Preds, ".epilog-lcssa", DT, LI, nullptr, PreserveLCSSA); - // Add the branch to the exit block (around the unrolling loop) + // Add the branch to the exit block (around the epilog loop) MDNode *BranchWeights = nullptr; if (hasBranchWeightMD(*Latch->getTerminator())) { // Assume equal distribution in interval [0, Count). @@ -322,10 +344,11 @@ static void ConnectEpilog(Loop *L, Value *ModVal, BasicBlock *NewExit, DT->changeImmediateDominator(Exit, NewDom); } - // Split the main loop exit to maintain canonicalization guarantees. - SmallVector<BasicBlock*, 4> NewExitPreds{Latch}; - SplitBlockPredecessors(NewExit, NewExitPreds, ".loopexit", DT, LI, nullptr, - PreserveLCSSA); + // In EpilogPreHeader, assume extra iters is non-zero. + IRBuilder<> B2(EpilogPreHeader, EpilogPreHeader->getFirstNonPHIIt()); + Value *ModIsNotNull = B2.CreateIsNotNull(ModVal, "lcmp.mod"); + AssumeInst *AI = cast<AssumeInst>(B2.CreateAssumption(ModIsNotNull)); + AC.registerAssumption(AI); } /// Create a clone of the blocks in a loop and connect them together. A new @@ -795,7 +818,8 @@ bool llvm::UnrollRuntimeLoopRemainder( ConstantInt::get(BECount->getType(), Count - 1)) : B.CreateIsNotNull(ModVal, "lcmp.mod"); - BasicBlock *RemainderLoop = UseEpilogRemainder ? NewExit : PrologPreHeader; + BasicBlock *RemainderLoop = + UseEpilogRemainder ? EpilogPreHeader : PrologPreHeader; BasicBlock *UnrollingLoop = UseEpilogRemainder ? NewPreHeader : PrologExit; // Branch to either remainder (extra iterations) loop or unrolling loop. MDNode *BranchWeights = nullptr; @@ -808,7 +832,7 @@ bool llvm::UnrollRuntimeLoopRemainder( PreHeaderBR->eraseFromParent(); if (DT) { if (UseEpilogRemainder) - DT->changeImmediateDominator(NewExit, PreHeader); + DT->changeImmediateDominator(EpilogPreHeader, PreHeader); else DT->changeImmediateDominator(PrologExit, PreHeader); } @@ -880,7 +904,8 @@ bool llvm::UnrollRuntimeLoopRemainder( // from both the original loop and the remainder code reaching the exit // blocks. While the IDom of these exit blocks were from the original loop, // now the IDom is the preheader (which decides whether the original loop or - // remainder code should run). + // remainder code should run) unless the block still has just the original + // predecessor (such as NewExit in the case of an epilog remainder). if (DT && !L->getExitingBlock()) { SmallVector<BasicBlock *, 16> ChildrenToUpdate; // NB! We have to examine the dom children of all loop blocks, not just @@ -891,7 +916,8 @@ bool llvm::UnrollRuntimeLoopRemainder( auto *DomNodeBB = DT->getNode(BB); for (auto *DomChild : DomNodeBB->children()) { auto *DomChildBB = DomChild->getBlock(); - if (!L->contains(LI->getLoopFor(DomChildBB))) + if (!L->contains(LI->getLoopFor(DomChildBB)) && + DomChildBB->getUniquePredecessor() != BB) ChildrenToUpdate.push_back(DomChildBB); } } @@ -930,7 +956,7 @@ bool llvm::UnrollRuntimeLoopRemainder( // Connect the epilog code to the original loop and update the // PHI functions. ConnectEpilog(L, ModVal, NewExit, LatchExit, PreHeader, EpilogPreHeader, - NewPreHeader, VMap, DT, LI, PreserveLCSSA, *SE, Count); + NewPreHeader, VMap, DT, LI, PreserveLCSSA, *SE, Count, *AC); // Update counter in loop for unrolling. // Use an incrementing IV. Pre-incr/post-incr is backedge/trip count. diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index c167dd7..fb696be 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -2263,8 +2263,7 @@ public: /// debug location \p DL. VPWidenPHIRecipe(PHINode *Phi, VPValue *Start = nullptr, DebugLoc DL = DebugLoc::getUnknown(), const Twine &Name = "") - : VPSingleDefRecipe(VPDef::VPWidenPHISC, ArrayRef<VPValue *>(), Phi, DL), - Name(Name.str()) { + : VPSingleDefRecipe(VPDef::VPWidenPHISC, {}, Phi, DL), Name(Name.str()) { if (Start) addOperand(Start); } diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index ebf833e..c8a2d84 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -3180,9 +3180,8 @@ expandVPWidenIntOrFpInduction(VPWidenIntOrFpInductionRecipe *WidenIVR, DebugLoc::getUnknown(), "induction"); // Create the widened phi of the vector IV. - auto *WidePHI = new VPWidenPHIRecipe(WidenIVR->getPHINode(), nullptr, + auto *WidePHI = new VPWidenPHIRecipe(WidenIVR->getPHINode(), Init, WidenIVR->getDebugLoc(), "vec.ind"); - WidePHI->addOperand(Init); WidePHI->insertBefore(WidenIVR); // Create the backedge value for the vector IV. @@ -3545,8 +3544,7 @@ tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red, VPValue *A, *B; VPValue *Tmp = nullptr; // Sub reductions could have a sub between the add reduction and vec op. - if (match(VecOp, - m_Binary<Instruction::Sub>(m_SpecificInt(0), m_VPValue(Tmp)))) { + if (match(VecOp, m_Sub(m_ZeroInt(), m_VPValue(Tmp)))) { Sub = VecOp->getDefiningRecipe(); VecOp = Tmp; } diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp index 0599930..66748c5 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp @@ -71,8 +71,8 @@ bool vputils::isHeaderMask(const VPValue *V, VPlan &Plan) { m_Specific(&Plan.getVF()))) || IsWideCanonicalIV(A)); - return match(V, m_Binary<Instruction::ICmp>(m_VPValue(A), m_VPValue(B))) && - IsWideCanonicalIV(A) && B == Plan.getOrCreateBackedgeTakenCount(); + return match(V, m_ICmp(m_VPValue(A), m_VPValue(B))) && IsWideCanonicalIV(A) && + B == Plan.getOrCreateBackedgeTakenCount(); } const SCEV *vputils::getSCEVExprForVPValue(VPValue *V, ScalarEvolution &SE) { diff --git a/llvm/runtimes/CMakeLists.txt b/llvm/runtimes/CMakeLists.txt index 8399292..d877f0b 100644 --- a/llvm/runtimes/CMakeLists.txt +++ b/llvm/runtimes/CMakeLists.txt @@ -473,7 +473,6 @@ if(build_runtimes) if(LLVM_INCLUDE_TESTS) foreach(dep FileCheck clang - clang-offload-packager flang count lld @@ -489,6 +488,7 @@ if(build_runtimes) llvm-size llvm-symbolizer llvm-xray + llvm-offload-binary not obj2yaml opt @@ -548,7 +548,7 @@ if(build_runtimes) # that all .mod files are also properly build. list(APPEND extra_deps "flang" "module_files") endif() - foreach(dep opt llvm-link llvm-extract clang clang-offload-packager clang-nvlink-wrapper) + foreach(dep opt llvm-link llvm-extract clang llvm-offload-binary clang-nvlink-wrapper) if(TARGET ${dep}) list(APPEND extra_deps ${dep}) endif() @@ -556,8 +556,8 @@ if(build_runtimes) endif() if(LLVM_LIBC_GPU_BUILD) list(APPEND extra_cmake_args "-DLLVM_LIBC_GPU_BUILD=ON") - if(TARGET clang-offload-packager) - list(APPEND extra_deps clang-offload-packager) + if(TARGET llvm-offload-binary) + list(APPEND extra_deps llvm-offload-binary) endif() if(TARGET clang-nvlink-wrapper) list(APPEND extra_deps clang-nvlink-wrapper) diff --git a/llvm/test/Assembler/ConstantExprFold.ll b/llvm/test/Assembler/ConstantExprFold.ll index 840ed06..33ee492 100644 --- a/llvm/test/Assembler/ConstantExprFold.ll +++ b/llvm/test/Assembler/ConstantExprFold.ll @@ -30,9 +30,9 @@ ; Need a function to make update_test_checks.py work. ;. ; CHECK: @A = global i64 0 -; CHECK: @add = global ptr @A -; CHECK: @sub = global ptr @A -; CHECK: @xor = global ptr @A +; CHECK: @add = global ptr inttoptr (i64 ptrtoint (ptr @A to i64) to ptr) +; CHECK: @sub = global ptr inttoptr (i64 ptrtoint (ptr @A to i64) to ptr) +; CHECK: @xor = global ptr inttoptr (i64 ptrtoint (ptr @A to i64) to ptr) ; CHECK: @B = external global %Ty ; CHECK: @cons = weak global i32 0, align 8 ; CHECK: @gep1 = global <2 x ptr> undef diff --git a/llvm/test/Bitcode/attributes.ll b/llvm/test/Bitcode/attributes.ll index 8c1a763..aef7810 100644 --- a/llvm/test/Bitcode/attributes.ll +++ b/llvm/test/Bitcode/attributes.ll @@ -516,6 +516,11 @@ define void @f93() sanitize_realtime_blocking { ret void; } +; CHECK: define void @f_sanitize_alloc_token() #55 +define void @f_sanitize_alloc_token() sanitize_alloc_token { + ret void; +} + ; CHECK: define void @f87() [[FNRETTHUNKEXTERN:#[0-9]+]] define void @f87() fn_ret_thunk_extern { ret void } @@ -627,6 +632,7 @@ define void @dead_on_return(ptr dead_on_return %p) { ; CHECK: attributes #52 = { nosanitize_bounds } ; CHECK: attributes #53 = { sanitize_realtime } ; CHECK: attributes #54 = { sanitize_realtime_blocking } +; CHECK: attributes #55 = { sanitize_alloc_token } ; CHECK: attributes [[FNRETTHUNKEXTERN]] = { fn_ret_thunk_extern } ; CHECK: attributes [[SKIPPROFILE]] = { skipprofile } ; CHECK: attributes [[OPTDEBUG]] = { optdebug } diff --git a/llvm/test/Bitcode/compatibility.ll b/llvm/test/Bitcode/compatibility.ll index 0b5ce08..e21786e 100644 --- a/llvm/test/Bitcode/compatibility.ll +++ b/llvm/test/Bitcode/compatibility.ll @@ -1718,7 +1718,7 @@ exit: ; CHECK: select <2 x i1> <i1 true, i1 false>, <2 x i8> <i8 2, i8 3>, <2 x i8> <i8 3, i8 2> call void @f.nobuiltin() builtin - ; CHECK: call void @f.nobuiltin() #54 + ; CHECK: call void @f.nobuiltin() #55 call fastcc noalias ptr @f.noalias() noinline ; CHECK: call fastcc noalias ptr @f.noalias() #12 @@ -2151,6 +2151,9 @@ declare void @f.sanitize_realtime() sanitize_realtime declare void @f.sanitize_realtime_blocking() sanitize_realtime_blocking ; CHECK: declare void @f.sanitize_realtime_blocking() #53 +declare void @f.sanitize_alloc_token() sanitize_alloc_token +; CHECK: declare void @f.sanitize_alloc_token() #54 + ; CHECK: declare nofpclass(snan) float @nofpclass_snan(float nofpclass(snan)) declare nofpclass(snan) float @nofpclass_snan(float nofpclass(snan)) @@ -2284,7 +2287,8 @@ define float @nofpclass_callsites(float %arg, { float } %arg1) { ; CHECK: attributes #51 = { sanitize_numerical_stability } ; CHECK: attributes #52 = { sanitize_realtime } ; CHECK: attributes #53 = { sanitize_realtime_blocking } -; CHECK: attributes #54 = { builtin } +; CHECK: attributes #54 = { sanitize_alloc_token } +; CHECK: attributes #55 = { builtin } ;; Metadata diff --git a/llvm/test/CMakeLists.txt b/llvm/test/CMakeLists.txt index e810fcb6..f01422e 100644 --- a/llvm/test/CMakeLists.txt +++ b/llvm/test/CMakeLists.txt @@ -123,6 +123,7 @@ set(LLVM_TEST_DEPENDS llvm-objdump llvm-opt-fuzzer llvm-opt-report + llvm-offload-binary llvm-offload-wrapper llvm-otool llvm-pdbutil diff --git a/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll b/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll index ab51693..05d3e9c3 100644 --- a/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll +++ b/llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll @@ -497,12 +497,10 @@ define amdgpu_kernel void @test_fold_canonicalize_minnum_value_f32(ptr addrspace ret void } -; FIXME: Should there be more checks here? minnum with NaN operand is simplified away. +; FIXME: Should there be more checks here? minnum with sNaN operand is simplified to qNaN. ; GCN-LABEL: test_fold_canonicalize_sNaN_value_f32: -; GCN: {{flat|global}}_load_dword [[LOAD:v[0-9]+]] -; VI: v_mul_f32_e32 v{{[0-9]+}}, 1.0, [[LOAD]] -; GFX9: v_max_f32_e32 v{{[0-9]+}}, [[LOAD]], [[LOAD]] +; GCN: v_mov_b32_e32 v{{.+}}, 0x7fc00000 define amdgpu_kernel void @test_fold_canonicalize_sNaN_value_f32(ptr addrspace(1) %arg) { %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds float, ptr addrspace(1) %arg, i32 %id diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll index 3de6df2..833be20 100644 --- a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll +++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll @@ -1949,8 +1949,7 @@ define float @v_fneg_self_minimumnum_f32_ieee(float %a) #0 { ; GCN-LABEL: v_fneg_self_minimumnum_f32_ieee: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_mul_f32_e32 v0, -1.0, v0 -; GCN-NEXT: v_max_f32_e32 v0, v0, v0 +; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 ; GCN-NEXT: s_setpc_b64 s[30:31] %min = call float @llvm.minimumnum.f32(float %a, float %a) %min.fneg = fneg float %min @@ -1961,7 +1960,7 @@ define float @v_fneg_self_minimumnum_f32_no_ieee(float %a) #4 { ; GCN-LABEL: v_fneg_self_minimumnum_f32_no_ieee: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_max_f32_e64 v0, -v0, -v0 +; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 ; GCN-NEXT: s_setpc_b64 s[30:31] %min = call float @llvm.minimumnum.f32(float %a, float %a) %min.fneg = fneg float %min @@ -2285,8 +2284,7 @@ define float @v_fneg_self_maximumnum_f32_ieee(float %a) #0 { ; GCN-LABEL: v_fneg_self_maximumnum_f32_ieee: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_mul_f32_e32 v0, -1.0, v0 -; GCN-NEXT: v_min_f32_e32 v0, v0, v0 +; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 ; GCN-NEXT: s_setpc_b64 s[30:31] %max = call float @llvm.maximumnum.f32(float %a, float %a) %max.fneg = fneg float %max @@ -2297,7 +2295,7 @@ define float @v_fneg_self_maximumnum_f32_no_ieee(float %a) #4 { ; GCN-LABEL: v_fneg_self_maximumnum_f32_no_ieee: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_min_f32_e64 v0, -v0, -v0 +; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 ; GCN-NEXT: s_setpc_b64 s[30:31] %max = call float @llvm.maximumnum.f32(float %a, float %a) %max.fneg = fneg float %max diff --git a/llvm/test/DebugInfo/KeyInstructions/Generic/loop-unroll-runtime.ll b/llvm/test/DebugInfo/KeyInstructions/Generic/loop-unroll-runtime.ll index d23afae..abcc566 100644 --- a/llvm/test/DebugInfo/KeyInstructions/Generic/loop-unroll-runtime.ll +++ b/llvm/test/DebugInfo/KeyInstructions/Generic/loop-unroll-runtime.ll @@ -5,7 +5,7 @@ ;; Check atoms are remapped for runtime unrolling. ; CHECK: for.body.epil: -; CHECK-NEXT: store i64 %indvars.iv.unr, ptr %p, align 4, !dbg [[G2R1:!.*]] +; CHECK-NEXT: store i64 %indvars.iv.epil.init, ptr %p, align 4, !dbg [[G2R1:!.*]] ; CHECK: for.body.epil.1: ; CHECK-NEXT: store i64 %indvars.iv.next.epil, ptr %p, align 4, !dbg [[G3R1:!.*]] diff --git a/llvm/test/Instrumentation/AllocToken/basic.ll b/llvm/test/Instrumentation/AllocToken/basic.ll new file mode 100644 index 0000000..099d37d --- /dev/null +++ b/llvm/test/Instrumentation/AllocToken/basic.ll @@ -0,0 +1,98 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=inferattrs,alloc-token -alloc-token-mode=increment -S | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" + +declare ptr @malloc(i64) +declare ptr @calloc(i64, i64) +declare ptr @realloc(ptr, i64) +declare ptr @_Znwm(i64) +declare ptr @_Znam(i64) +declare void @free(ptr) +declare void @_ZdlPv(ptr) +declare i32 @foobar(i64) + +; Test basic allocation call rewriting +define ptr @test_basic_rewriting() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_basic_rewriting( +; CHECK-SAME: ) #[[ATTR5:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__alloc_token_malloc(i64 64, i64 0) +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__alloc_token_calloc(i64 8, i64 8, i64 1) +; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__alloc_token_realloc(ptr [[TMP0]], i64 128, i64 2) +; CHECK-NEXT: ret ptr [[TMP2]] +; +entry: + %ptr1 = call ptr @malloc(i64 64) + %ptr2 = call ptr @calloc(i64 8, i64 8) + %ptr3 = call ptr @realloc(ptr %ptr1, i64 128) + ret ptr %ptr3 +} + +; Test C++ operator rewriting +define ptr @test_cpp_operators() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_cpp_operators( +; CHECK-SAME: ) #[[ATTR5]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__alloc_token__Znwm(i64 32, i64 3) +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__alloc_token__Znam(i64 64, i64 4) +; CHECK-NEXT: ret ptr [[TMP0]] +; +entry: + %ptr1 = call ptr @_Znwm(i64 32) + %ptr2 = call ptr @_Znam(i64 64) + ret ptr %ptr1 +} + +; Functions without sanitize_alloc_token do not get instrumented +define ptr @without_attribute() { +; CHECK-LABEL: define ptr @without_attribute() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = call ptr @malloc(i64 16) +; CHECK-NEXT: ret ptr [[PTR]] +; +entry: + %ptr = call ptr @malloc(i64 16) + ret ptr %ptr +} + +; Test that free/delete are untouched +define void @test_free_untouched(ptr %ptr) sanitize_alloc_token { +; CHECK-LABEL: define void @test_free_untouched( +; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR5]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @free(ptr [[PTR]]) +; CHECK-NEXT: call void @_ZdlPv(ptr [[PTR]]) +; CHECK-NEXT: ret void +; +entry: + call void @free(ptr %ptr) + call void @_ZdlPv(ptr %ptr) + ret void +} + +; Non-allocation functions are untouched +define i32 @no_allocations(i32 %x) sanitize_alloc_token { +; CHECK-LABEL: define i32 @no_allocations( +; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR5]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[RESULT:%.*]] = call i32 @foobar(i64 42) +; CHECK-NEXT: ret i32 [[RESULT]] +; +entry: + %result = call i32 @foobar(i64 42) + ret i32 %result +} + +; Test that tail calls are preserved +define ptr @test_tail_call_preserved() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_tail_call_preserved( +; CHECK-SAME: ) #[[ATTR5]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = tail call ptr @__alloc_token_malloc(i64 42, i64 5) +; CHECK-NEXT: ret ptr [[TMP0]] +; +entry: + %result = tail call ptr @malloc(i64 42) + ret ptr %result +} diff --git a/llvm/test/Instrumentation/AllocToken/basic32.ll b/llvm/test/Instrumentation/AllocToken/basic32.ll new file mode 100644 index 0000000..944a452 --- /dev/null +++ b/llvm/test/Instrumentation/AllocToken/basic32.ll @@ -0,0 +1,31 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=inferattrs,alloc-token -alloc-token-mode=increment -S | FileCheck %s + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128" + +declare ptr @malloc(i32) +declare ptr @_Znwm(i32) + +define ptr @test_basic_rewriting() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_basic_rewriting( +; CHECK-SAME: ) #[[ATTR2:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__alloc_token_malloc(i32 64, i32 0) +; CHECK-NEXT: ret ptr [[TMP0]] +; +entry: + %ptr1 = call ptr @malloc(i32 64) + ret ptr %ptr1 +} + +define ptr @test_cpp_operators() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_cpp_operators( +; CHECK-SAME: ) #[[ATTR2]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__alloc_token__Znwm(i32 32, i32 1) +; CHECK-NEXT: ret ptr [[TMP0]] +; +entry: + %ptr1 = call ptr @_Znwm(i32 32) + ret ptr %ptr1 +} diff --git a/llvm/test/Instrumentation/AllocToken/extralibfuncs.ll b/llvm/test/Instrumentation/AllocToken/extralibfuncs.ll new file mode 100644 index 0000000..5f08552 --- /dev/null +++ b/llvm/test/Instrumentation/AllocToken/extralibfuncs.ll @@ -0,0 +1,44 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; Test for special libfuncs not automatically considered allocation functions. +; +; RUN: opt < %s -passes=inferattrs,alloc-token -S | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" + +declare {ptr, i64} @__size_returning_new(i64) + +define ptr @test_extra_libfuncs() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_extra_libfuncs( +; CHECK-SAME: ) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call { ptr, i64 } @__alloc_token___size_returning_new(i64 10, i64 2689373973731826898), !alloc_token [[META0:![0-9]+]] +; CHECK-NEXT: [[PTR1:%.*]] = extractvalue { ptr, i64 } [[TMP0]], 0 +; CHECK-NEXT: ret ptr [[PTR1]] +; +entry: + %srn = call {ptr, i64} @__size_returning_new(i64 10), !alloc_token !0 + %ptr1 = extractvalue {ptr, i64} %srn, 0 + ret ptr %ptr1 +} + +declare ptr @_Znwm(i64) nobuiltin allocsize(0) +declare ptr @_Znam(i64) nobuiltin allocsize(0) + +define ptr @test_replaceable_new() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_replaceable_new( +; CHECK-SAME: ) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__alloc_token__Znwm(i64 32, i64 2689373973731826898), !alloc_token [[META0]] +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__alloc_token__Znam(i64 64, i64 2689373973731826898), !alloc_token [[META0]] +; CHECK-NEXT: ret ptr [[TMP0]] +; +entry: + %ptr1 = call ptr @_Znwm(i64 32), !alloc_token !0 + %ptr2 = call ptr @_Znam(i64 64), !alloc_token !0 + ret ptr %ptr1 +} + +!0 = !{!"int"} +;. +; CHECK: [[META0]] = !{!"int"} +;. diff --git a/llvm/test/Instrumentation/AllocToken/fast.ll b/llvm/test/Instrumentation/AllocToken/fast.ll new file mode 100644 index 0000000..19a3ef6 --- /dev/null +++ b/llvm/test/Instrumentation/AllocToken/fast.ll @@ -0,0 +1,42 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=inferattrs,alloc-token -alloc-token-mode=increment -alloc-token-fast-abi -alloc-token-max=3 -S | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" + +declare ptr @malloc(i64) +declare ptr @calloc(i64, i64) +declare ptr @realloc(ptr, i64) +declare ptr @_Znwm(i64) +declare ptr @_Znam(i64) + +; Test basic allocation call rewriting +define ptr @test_basic_rewriting() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_basic_rewriting( +; CHECK-SAME: ) #[[ATTR4:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR1:%.*]] = call ptr @__alloc_token_0_malloc(i64 64) +; CHECK-NEXT: [[PTR2:%.*]] = call ptr @__alloc_token_1_calloc(i64 8, i64 8) +; CHECK-NEXT: [[PTR3:%.*]] = call ptr @__alloc_token_2_realloc(ptr [[PTR1]], i64 128) +; CHECK-NEXT: ret ptr [[PTR3]] +; +entry: + %ptr1 = call ptr @malloc(i64 64) + %ptr2 = call ptr @calloc(i64 8, i64 8) + %ptr3 = call ptr @realloc(ptr %ptr1, i64 128) + ret ptr %ptr3 +} + +; Test C++ operator rewriting +define ptr @test_cpp_operators() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_cpp_operators( +; CHECK-SAME: ) #[[ATTR4]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR1:%.*]] = call ptr @__alloc_token_0__Znwm(i64 32) +; CHECK-NEXT: [[PTR2:%.*]] = call ptr @__alloc_token_1__Znam(i64 64) +; CHECK-NEXT: ret ptr [[PTR1]] +; +entry: + %ptr1 = call ptr @_Znwm(i64 32) + %ptr2 = call ptr @_Znam(i64 64) + ret ptr %ptr1 +} diff --git a/llvm/test/Instrumentation/AllocToken/ignore.ll b/llvm/test/Instrumentation/AllocToken/ignore.ll new file mode 100644 index 0000000..b92a920 --- /dev/null +++ b/llvm/test/Instrumentation/AllocToken/ignore.ll @@ -0,0 +1,29 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; Test for all allocation functions that should be ignored by default. +; +; RUN: opt < %s -passes=inferattrs,alloc-token -S | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" + +declare ptr @strdup(ptr) +declare ptr @__strdup(ptr) +declare ptr @strndup(ptr, i64) +declare ptr @__strndup(ptr, i64) + +define ptr @test_ignored_allocation_functions(ptr %ptr) sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_ignored_allocation_functions( +; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR2:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR1:%.*]] = call ptr @strdup(ptr [[PTR]]) +; CHECK-NEXT: [[PTR2:%.*]] = call ptr @__strdup(ptr [[PTR]]) +; CHECK-NEXT: [[PTR3:%.*]] = call ptr @strndup(ptr [[PTR]], i64 42) +; CHECK-NEXT: [[PTR4:%.*]] = call ptr @__strndup(ptr [[PTR]], i64 42) +; CHECK-NEXT: ret ptr [[PTR1]] +; +entry: + %ptr1 = call ptr @strdup(ptr %ptr) + %ptr2 = call ptr @__strdup(ptr %ptr) + %ptr3 = call ptr @strndup(ptr %ptr, i64 42) + %ptr4 = call ptr @__strndup(ptr %ptr, i64 42) + ret ptr %ptr1 +} diff --git a/llvm/test/Instrumentation/AllocToken/invoke.ll b/llvm/test/Instrumentation/AllocToken/invoke.ll new file mode 100644 index 0000000..347c99a --- /dev/null +++ b/llvm/test/Instrumentation/AllocToken/invoke.ll @@ -0,0 +1,123 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=inferattrs,alloc-token -alloc-token-mode=increment -S | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" + +define ptr @test_invoke_malloc() sanitize_alloc_token personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: define ptr @test_invoke_malloc( +; CHECK-SAME: ) #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = invoke ptr @__alloc_token_malloc(i64 64, i64 0) +; CHECK-NEXT: to label %[[NORMAL:.*]] unwind label %[[CLEANUP:.*]] +; CHECK: [[NORMAL]]: +; CHECK-NEXT: ret ptr [[TMP0]] +; CHECK: [[CLEANUP]]: +; CHECK-NEXT: [[LP:%.*]] = landingpad { ptr, i32 } +; CHECK-NEXT: cleanup +; CHECK-NEXT: ret ptr null +; +entry: + %ptr = invoke ptr @malloc(i64 64) to label %normal unwind label %cleanup + +normal: + ret ptr %ptr + +cleanup: + %lp = landingpad { ptr, i32 } cleanup + ret ptr null +} + +define ptr @test_invoke_operator_new() sanitize_alloc_token personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: define ptr @test_invoke_operator_new( +; CHECK-SAME: ) #[[ATTR0]] personality ptr @__gxx_personality_v0 { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = invoke ptr @__alloc_token__Znwm(i64 32, i64 1) +; CHECK-NEXT: to label %[[NORMAL:.*]] unwind label %[[CLEANUP:.*]] +; CHECK: [[NORMAL]]: +; CHECK-NEXT: ret ptr [[TMP0]] +; CHECK: [[CLEANUP]]: +; CHECK-NEXT: [[LP:%.*]] = landingpad { ptr, i32 } +; CHECK-NEXT: cleanup +; CHECK-NEXT: ret ptr null +; +entry: + %ptr = invoke ptr @_Znwm(i64 32) to label %normal unwind label %cleanup + +normal: + ret ptr %ptr + +cleanup: + %lp = landingpad { ptr, i32 } cleanup + ret ptr null +} + +; Test complex exception flow with multiple invoke allocations +define ptr @test_complex_invoke_flow() sanitize_alloc_token personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: define ptr @test_complex_invoke_flow( +; CHECK-SAME: ) #[[ATTR0]] personality ptr @__gxx_personality_v0 { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = invoke ptr @__alloc_token_malloc(i64 16, i64 2) +; CHECK-NEXT: to label %[[FIRST_OK:.*]] unwind label %[[CLEANUP1:.*]] +; CHECK: [[FIRST_OK]]: +; CHECK-NEXT: [[TMP1:%.*]] = invoke ptr @__alloc_token__Znwm(i64 32, i64 3) +; CHECK-NEXT: to label %[[SECOND_OK:.*]] unwind label %[[CLEANUP2:.*]] +; CHECK: [[SECOND_OK]]: +; CHECK-NEXT: ret ptr [[TMP0]] +; CHECK: [[CLEANUP1]]: +; CHECK-NEXT: [[LP1:%.*]] = landingpad { ptr, i32 } +; CHECK-NEXT: cleanup +; CHECK-NEXT: ret ptr null +; CHECK: [[CLEANUP2]]: +; CHECK-NEXT: [[LP2:%.*]] = landingpad { ptr, i32 } +; CHECK-NEXT: cleanup +; CHECK-NEXT: ret ptr null +; +entry: + %ptr1 = invoke ptr @malloc(i64 16) to label %first_ok unwind label %cleanup1 + +first_ok: + %ptr2 = invoke ptr @_Znwm(i64 32) to label %second_ok unwind label %cleanup2 + +second_ok: + ret ptr %ptr1 + +cleanup1: + %lp1 = landingpad { ptr, i32 } cleanup + ret ptr null + +cleanup2: + %lp2 = landingpad { ptr, i32 } cleanup + ret ptr null +} + +; Test mixed call/invoke +define ptr @test_mixed_call_invoke() sanitize_alloc_token personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: define ptr @test_mixed_call_invoke( +; CHECK-SAME: ) #[[ATTR0]] personality ptr @__gxx_personality_v0 { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__alloc_token_malloc(i64 8, i64 4) +; CHECK-NEXT: [[TMP1:%.*]] = invoke ptr @__alloc_token_malloc(i64 16, i64 5) +; CHECK-NEXT: to label %[[NORMAL:.*]] unwind label %[[CLEANUP:.*]] +; CHECK: [[NORMAL]]: +; CHECK-NEXT: ret ptr [[TMP0]] +; CHECK: [[CLEANUP]]: +; CHECK-NEXT: [[LP:%.*]] = landingpad { ptr, i32 } +; CHECK-NEXT: cleanup +; CHECK-NEXT: ret ptr null +; +entry: + %ptr1 = call ptr @malloc(i64 8) + + %ptr2 = invoke ptr @malloc(i64 16) to label %normal unwind label %cleanup + +normal: + ret ptr %ptr1 + +cleanup: + %lp = landingpad { ptr, i32 } cleanup + ret ptr null +} + +declare ptr @malloc(i64) +declare ptr @_Znwm(i64) +declare i32 @__gxx_personality_v0(...) diff --git a/llvm/test/Instrumentation/AllocToken/nonlibcalls.ll b/llvm/test/Instrumentation/AllocToken/nonlibcalls.ll new file mode 100644 index 0000000..e023ab6b --- /dev/null +++ b/llvm/test/Instrumentation/AllocToken/nonlibcalls.ll @@ -0,0 +1,85 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=inferattrs,alloc-token -alloc-token-mode=increment -alloc-token-extended -S | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" + +declare ptr @malloc(i64) +declare ptr @custom_malloc(i64) +declare ptr @kmalloc(i64, i64) + +define ptr @test_libcall() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_libcall( +; CHECK-SAME: ) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__alloc_token_malloc(i64 64, i64 0) +; CHECK-NEXT: ret ptr [[TMP0]] +; +entry: + %ptr1 = call ptr @malloc(i64 64) + ret ptr %ptr1 +} + +define ptr @test_libcall_hint() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_libcall_hint( +; CHECK-SAME: ) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__alloc_token_malloc(i64 64, i64 1), !alloc_token [[META0:![0-9]+]] +; CHECK-NEXT: ret ptr [[TMP0]] +; +entry: + %ptr1 = call ptr @malloc(i64 64), !alloc_token !0 + ret ptr %ptr1 +} + +define ptr @test_nonlibcall_nohint() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_nonlibcall_nohint( +; CHECK-SAME: ) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR1:%.*]] = call ptr @custom_malloc(i64 8) +; CHECK-NEXT: [[PTR2:%.*]] = call ptr @kmalloc(i64 32, i64 0) +; CHECK-NEXT: ret ptr [[PTR1]] +; +entry: + %ptr1 = call ptr @custom_malloc(i64 8) + %ptr2 = call ptr @kmalloc(i64 32, i64 0) + ret ptr %ptr1 +} + +define ptr @test_nonlibcall_hint() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_nonlibcall_hint( +; CHECK-SAME: ) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__alloc_token_custom_malloc(i64 8, i64 2), !alloc_token [[META0]] +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @__alloc_token_kmalloc(i64 32, i64 0, i64 3), !alloc_token [[META0]] +; CHECK-NEXT: [[TMP2:%.*]] = call ptr @__alloc_token_custom_malloc(i64 64, i64 4), !alloc_token [[META0]] +; CHECK-NEXT: [[TMP3:%.*]] = call ptr @__alloc_token_kmalloc(i64 128, i64 2, i64 5), !alloc_token [[META0]] +; CHECK-NEXT: ret ptr [[TMP0]] +; +entry: + %ptr1 = call ptr @custom_malloc(i64 8), !alloc_token !0 + %ptr2 = call ptr @kmalloc(i64 32, i64 0), !alloc_token !0 + %ptr3 = call ptr @custom_malloc(i64 64), !alloc_token !0 + %ptr4 = call ptr @kmalloc(i64 128, i64 2), !alloc_token !0 + ret ptr %ptr1 +} + +; Functions without sanitize_alloc_token do not get instrumented +define ptr @without_attribute() { +; CHECK-LABEL: define ptr @without_attribute() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR1:%.*]] = call ptr @malloc(i64 64), !alloc_token [[META0]] +; CHECK-NEXT: [[PTR2:%.*]] = call ptr @custom_malloc(i64 8), !alloc_token [[META0]] +; CHECK-NEXT: [[PTR3:%.*]] = call ptr @kmalloc(i64 32, i64 0), !alloc_token [[META0]] +; CHECK-NEXT: ret ptr [[PTR1]] +; +entry: + %ptr1 = call ptr @malloc(i64 64), !alloc_token !0 + %ptr2 = call ptr @custom_malloc(i64 8), !alloc_token !0 + %ptr3 = call ptr @kmalloc(i64 32, i64 0), !alloc_token !0 + ret ptr %ptr1 +} + +!0 = !{!"int"} +;. +; CHECK: [[META0]] = !{!"int"} +;. diff --git a/llvm/test/Instrumentation/AllocToken/remark.ll b/llvm/test/Instrumentation/AllocToken/remark.ll new file mode 100644 index 0000000..a2404526 --- /dev/null +++ b/llvm/test/Instrumentation/AllocToken/remark.ll @@ -0,0 +1,38 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=inferattrs,alloc-token -pass-remarks=alloc-token -S 2>&1 | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" + +declare ptr @malloc(i64) + +; CHECK-NOT: remark: <unknown>:0:0: Call to 'malloc' in 'test_has_metadata' without source-level type token +; CHECK: remark: <unknown>:0:0: Call to 'malloc' in 'test_no_metadata' without source-level type token + +define ptr @test_has_metadata() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_has_metadata( +; CHECK-SAME: ) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__alloc_token_malloc(i64 64, i64 2689373973731826898), !alloc_token [[META0:![0-9]+]] +; CHECK-NEXT: ret ptr [[TMP0]] +; +entry: + %ptr1 = call ptr @malloc(i64 64), !alloc_token !0 + ret ptr %ptr1 +} + +define ptr @test_no_metadata() sanitize_alloc_token { +; CHECK-LABEL: define ptr @test_no_metadata( +; CHECK-SAME: ) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call ptr @__alloc_token_malloc(i64 32, i64 0) +; CHECK-NEXT: ret ptr [[TMP0]] +; +entry: + %ptr1 = call ptr @malloc(i64 32) + ret ptr %ptr1 +} + +!0 = !{!"int"} +;. +; CHECK: [[META0]] = !{!"int"} +;. diff --git a/llvm/test/TableGen/RuntimeLibcallEmitter-calling-conv.td b/llvm/test/TableGen/RuntimeLibcallEmitter-calling-conv.td index 7ec70b7..98a376b 100644 --- a/llvm/test/TableGen/RuntimeLibcallEmitter-calling-conv.td +++ b/llvm/test/TableGen/RuntimeLibcallEmitter-calling-conv.td @@ -48,39 +48,79 @@ def MSP430LibraryWithCondCC : SystemRuntimeLibrary<isMSP430, // CHECK-NEXT: Entry = DefaultCC; // CHECK-NEXT: } // CHECK-EMPTY: -// CHECK-NEXT: setLibcallImpl(RTLIB::MALLOC, RTLIB::impl_malloc); // malloc +// CHECK-NEXT: static const LibcallImplPair LibraryCalls[] = { +// CHECK-NEXT: {RTLIB::MALLOC, RTLIB::impl_malloc}, // malloc +// CHECK-NEXT: }; // CHECK-EMPTY: -// CHECK-NEXT: setLibcallImpl(RTLIB::SDIVREM_I8, RTLIB::impl___divmodqi4); // __divmodqi4 -// CHECK-NEXT: setLibcallImplCallingConv(RTLIB::impl___divmodqi4, CallingConv::AVR_BUILTIN); -// CHECK-NEXT: setLibcallImpl(RTLIB::UDIVREM_I16, RTLIB::impl___udivmodhi4); // __udivmodhi4 -// CHECK-NEXT: setLibcallImplCallingConv(RTLIB::impl___udivmodhi4, CallingConv::AVR_BUILTIN); +// CHECK-NEXT: for (const auto [Func, Impl] : LibraryCalls) { +// CHECK-NEXT: setLibcallImpl(Func, Impl); +// CHECK-NEXT: } +// CHECK-EMPTY: +// CHECK-NEXT: static const LibcallImplPair LibraryCalls_AlwaysAvailable_AVR_BUILTIN[] = { +// CHECK-NEXT: {RTLIB::SDIVREM_I8, RTLIB::impl___divmodqi4}, // __divmodqi4 +// CHECK-NEXT: {RTLIB::UDIVREM_I16, RTLIB::impl___udivmodhi4}, // __udivmodhi4 +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: for (const auto [Func, Impl] : LibraryCalls_AlwaysAvailable_AVR_BUILTIN) { +// CHECK-NEXT: setLibcallImpl(Func, Impl); +// CHECK-NEXT: setLibcallImplCallingConv(Impl, CallingConv::AVR_BUILTIN); +// CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: return; // CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: if (TT.getArch() == Triple::avr) { -// CHECK-NEXT: setLibcallImpl(RTLIB::MALLOC, RTLIB::impl_malloc); // malloc +// CHECK-NEXT: static const LibcallImplPair LibraryCalls[] = { +// CHECK-NEXT: {RTLIB::MALLOC, RTLIB::impl_malloc}, // malloc +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: for (const auto [Func, Impl] : LibraryCalls) { +// CHECK-NEXT: setLibcallImpl(Func, Impl); +// CHECK-NEXT: } // CHECK-EMPTY: -// CHECK-NEXT: setLibcallImpl(RTLIB::SDIVREM_I8, RTLIB::impl___divmodqi4); // __divmodqi4 -// CHECK-NEXT: setLibcallImplCallingConv(RTLIB::impl___divmodqi4, CallingConv::AVR_BUILTIN); -// CHECK-NEXT: setLibcallImpl(RTLIB::UDIVREM_I16, RTLIB::impl___udivmodhi4); // __udivmodhi4 -// CHECK-NEXT: setLibcallImplCallingConv(RTLIB::impl___udivmodhi4, CallingConv::AVR_BUILTIN); +// CHECK-NEXT: static const LibcallImplPair LibraryCalls_AlwaysAvailable_AVR_BUILTIN[] = { +// CHECK-NEXT: {RTLIB::SDIVREM_I8, RTLIB::impl___divmodqi4}, // __divmodqi4 +// CHECK-NEXT: {RTLIB::UDIVREM_I16, RTLIB::impl___udivmodhi4}, // __udivmodhi4 +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: for (const auto [Func, Impl] : LibraryCalls_AlwaysAvailable_AVR_BUILTIN) { +// CHECK-NEXT: setLibcallImpl(Func, Impl); +// CHECK-NEXT: setLibcallImplCallingConv(Impl, CallingConv::AVR_BUILTIN); +// CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: return; // CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: if (TT.getArch() == Triple::msp430) { -// CHECK-NEXT: setLibcallImpl(RTLIB::MALLOC, RTLIB::impl_malloc); // malloc +// CHECK-NEXT: static const LibcallImplPair LibraryCalls[] = { +// CHECK-NEXT: {RTLIB::MALLOC, RTLIB::impl_malloc}, // malloc +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: for (const auto [Func, Impl] : LibraryCalls) { +// CHECK-NEXT: setLibcallImpl(Func, Impl); +// CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: if ( isFoo() ) { -// CHECK-NEXT: setLibcallImpl(RTLIB::SDIVREM_I8, RTLIB::impl___divmodqi4); // __divmodqi4 -// CHECK-NEXT: setLibcallImplCallingConv(RTLIB::impl___divmodqi4, CallingConv::AVR_BUILTIN); +// CHECK-NEXT: static const LibcallImplPair LibraryCalls_anonymous_3_AVR_BUILTIN[] = { +// CHECK-NEXT: {RTLIB::SDIVREM_I8, RTLIB::impl___divmodqi4}, // __divmodqi4 +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: for (const auto [Func, Impl] : LibraryCalls_anonymous_3_AVR_BUILTIN) { +// CHECK-NEXT: setLibcallImpl(Func, Impl); +// CHECK-NEXT: setLibcallImplCallingConv(Impl, CallingConv::AVR_BUILTIN); +// CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: if ( isBar() ) { -// CHECK-NEXT: setLibcallImpl(RTLIB::UDIVREM_I16, RTLIB::impl___udivmodhi4); // __udivmodhi4 -// CHECK-NEXT: setLibcallImplCallingConv(RTLIB::impl___udivmodhi4, CallingConv::MSP430_BUILTIN); +// CHECK-NEXT: static const LibcallImplPair LibraryCalls_anonymous_5_MSP430_BUILTIN[] = { +// CHECK-NEXT: {RTLIB::UDIVREM_I16, RTLIB::impl___udivmodhi4}, // __udivmodhi4 +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: for (const auto [Func, Impl] : LibraryCalls_anonymous_5_MSP430_BUILTIN) { +// CHECK-NEXT: setLibcallImpl(Func, Impl); +// CHECK-NEXT: setLibcallImplCallingConv(Impl, CallingConv::MSP430_BUILTIN); +// CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: } // CHECK-EMPTY: diff --git a/llvm/test/TableGen/RuntimeLibcallEmitter-conflict-warning.td b/llvm/test/TableGen/RuntimeLibcallEmitter-conflict-warning.td index 112c33e..136c81b 100644 --- a/llvm/test/TableGen/RuntimeLibcallEmitter-conflict-warning.td +++ b/llvm/test/TableGen/RuntimeLibcallEmitter-conflict-warning.td @@ -25,7 +25,9 @@ def dup1 : RuntimeLibcallImpl<ANOTHER_DUP>; // func_a and func_b both provide SOME_FUNC. // CHECK: if (isTargetArchA()) { -// CHECK-NEXT: setLibcallImpl(RTLIB::SOME_FUNC, RTLIB::impl_func_b); // func_b +// CHECK-NEXT: static const LibcallImplPair LibraryCalls[] = { +// CHECK-NEXT: {RTLIB::SOME_FUNC, RTLIB::impl_func_b}, // func_b +// CHECK-NEXT: }; // ERR: :[[@LINE+1]]:5: warning: conflicting implementations for libcall SOME_FUNC: func_b, func_a def TheSystemLibraryA : SystemRuntimeLibrary<isTargetArchA, @@ -33,8 +35,10 @@ def TheSystemLibraryA : SystemRuntimeLibrary<isTargetArchA, >; // CHECK: if (isTargetArchB()) { -// CHECK-NEXT: setLibcallImpl(RTLIB::OTHER_FUNC, RTLIB::impl_other_func); // other_func -// CHECK-NEXT: setLibcallImpl(RTLIB::SOME_FUNC, RTLIB::impl_func_a); // func_a +// CHECK-NEXT: static const LibcallImplPair LibraryCalls[] = { +// CHECK-NEXT: {RTLIB::OTHER_FUNC, RTLIB::impl_other_func}, // other_func +// CHECK-NEXT: {RTLIB::SOME_FUNC, RTLIB::impl_func_a}, // func_a +// CHECK-NEXT: }; // ERR: :[[@LINE+1]]:5: warning: conflicting implementations for libcall SOME_FUNC: func_a, func_b def TheSystemLibraryB : SystemRuntimeLibrary<isTargetArchB, @@ -42,9 +46,11 @@ def TheSystemLibraryB : SystemRuntimeLibrary<isTargetArchB, >; // CHECK: if (isTargetArchC()) { -// CHECK-NEXT: setLibcallImpl(RTLIB::ANOTHER_DUP, RTLIB::impl_dup1); // dup1 -// CHECK-NEXT: setLibcallImpl(RTLIB::OTHER_FUNC, RTLIB::impl_other_func); // other_func -// CHECK-NEXT: setLibcallImpl(RTLIB::SOME_FUNC, RTLIB::impl_func_a); // func_a +// CHECK-NEXT: static const LibcallImplPair LibraryCalls[] = { +// CHECK-NEXT: {RTLIB::ANOTHER_DUP, RTLIB::impl_dup1}, // dup1 +// CHECK-NEXT: {RTLIB::OTHER_FUNC, RTLIB::impl_other_func}, // other_func +// CHECK-NEXT: {RTLIB::SOME_FUNC, RTLIB::impl_func_a}, // func_a +// CHECK-NEXT: }; // ERR: :[[@LINE+3]]:5: warning: conflicting implementations for libcall ANOTHER_DUP: dup1, dup0 // ERR: :[[@LINE+2]]:5: warning: conflicting implementations for libcall SOME_FUNC: func_a, func_b diff --git a/llvm/test/TableGen/RuntimeLibcallEmitter.td b/llvm/test/TableGen/RuntimeLibcallEmitter.td index f4577f8..c336fee 100644 --- a/llvm/test/TableGen/RuntimeLibcallEmitter.td +++ b/llvm/test/TableGen/RuntimeLibcallEmitter.td @@ -190,20 +190,42 @@ def BlahLibrary : SystemRuntimeLibrary<isBlahArch, (add calloc, LibraryWithCondi // CHECK-NEXT: } // CHECK: void llvm::RTLIB::RuntimeLibcallsInfo::setTargetRuntimeLibcallSets(const llvm::Triple &TT, ExceptionHandling ExceptionModel, FloatABI::ABIType FloatABI, EABI EABIVersion, StringRef ABIName) { +// CHECK-NEXT: struct LibcallImplPair { +// CHECK-NEXT: RTLIB::Libcall Func; +// CHECK-NEXT: RTLIB::LibcallImpl Impl; +// CHECK-NEXT: }; // CHECK-EMPTY: // CHECK-NEXT: if (TT.getArch() == Triple::blah) { -// CHECK-NEXT: setLibcallImpl(RTLIB::BZERO, RTLIB::impl_bzero); // bzero -// CHECK-NEXT: setLibcallImpl(RTLIB::CALLOC, RTLIB::impl_calloc); // calloc -// CHECK-NEXT: setLibcallImpl(RTLIB::SQRT_F128, RTLIB::impl_sqrtl_f128); // sqrtl +// CHECK-NEXT: static const LibcallImplPair LibraryCalls[] = { +// CHECK-NEXT: {RTLIB::BZERO, RTLIB::impl_bzero}, // bzero +// CHECK-NEXT: {RTLIB::CALLOC, RTLIB::impl_calloc}, // calloc +// CHECK-NEXT: {RTLIB::SQRT_F128, RTLIB::impl_sqrtl_f128}, // sqrtl +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: for (const auto [Func, Impl] : LibraryCalls) { +// CHECK-NEXT: setLibcallImpl(Func, Impl); +// CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: if (TT.hasCompilerRT()) { -// CHECK-NEXT: setLibcallImpl(RTLIB::SHL_I32, RTLIB::impl___ashlsi3); // __ashlsi3 -// CHECK-NEXT: setLibcallImpl(RTLIB::SRL_I64, RTLIB::impl___lshrdi3); // __lshrdi3 +// CHECK-NEXT: static const LibcallImplPair LibraryCalls_hasCompilerRT[] = { +// CHECK-NEXT: {RTLIB::SHL_I32, RTLIB::impl___ashlsi3}, // __ashlsi3 +// CHECK-NEXT: {RTLIB::SRL_I64, RTLIB::impl___lshrdi3}, // __lshrdi3 +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: for (const auto [Func, Impl] : LibraryCalls_hasCompilerRT) { +// CHECK-NEXT: setLibcallImpl(Func, Impl); +// CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: if (TT.getOS() == Triple::bar) { -// CHECK-NEXT: setLibcallImpl(RTLIB::MEMSET, RTLIB::impl____memset); // ___memset +// CHECK-NEXT: static const LibcallImplPair LibraryCalls_isBarOS[] = { +// CHECK-NEXT: {RTLIB::MEMSET, RTLIB::impl____memset}, // ___memset +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: for (const auto [Func, Impl] : LibraryCalls_isBarOS) { +// CHECK-NEXT: setLibcallImpl(Func, Impl); +// CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: } // CHECK-EMPTY: @@ -211,19 +233,37 @@ def BlahLibrary : SystemRuntimeLibrary<isBlahArch, (add calloc, LibraryWithCondi // CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: if (TT.getArch() == Triple::buzz) { -// CHECK-NEXT: setLibcallImpl(RTLIB::SHL_I32, RTLIB::impl___ashlsi3); // __ashlsi3 -// CHECK-NEXT: setLibcallImpl(RTLIB::SQRT_F80, RTLIB::impl_sqrtl_f80); // sqrtl -// CHECK-NEXT: setLibcallImpl(RTLIB::SRL_I64, RTLIB::impl___lshrdi3); // __lshrdi3 +// CHECK-NEXT: static const LibcallImplPair LibraryCalls[] = { +// CHECK-NEXT: {RTLIB::SHL_I32, RTLIB::impl___ashlsi3}, // __ashlsi3 +// CHECK-NEXT: {RTLIB::SQRT_F80, RTLIB::impl_sqrtl_f80}, // sqrtl +// CHECK-NEXT: {RTLIB::SRL_I64, RTLIB::impl___lshrdi3}, // __lshrdi3 +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: for (const auto [Func, Impl] : LibraryCalls) { +// CHECK-NEXT: setLibcallImpl(Func, Impl); +// CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: return; // CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: if (TT.getArch() == Triple::foo) { -// CHECK-NEXT: setLibcallImpl(RTLIB::BZERO, RTLIB::impl_bzero); // bzero -// CHECK-NEXT: setLibcallImpl(RTLIB::SQRT_F128, RTLIB::impl_sqrtl_f128); // sqrtl +// CHECK-NEXT: static const LibcallImplPair LibraryCalls[] = { +// CHECK-NEXT: {RTLIB::BZERO, RTLIB::impl_bzero}, // bzero +// CHECK-NEXT: {RTLIB::SQRT_F128, RTLIB::impl_sqrtl_f128}, // sqrtl +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: for (const auto [Func, Impl] : LibraryCalls) { +// CHECK-NEXT: setLibcallImpl(Func, Impl); +// CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: if (TT.getOS() == Triple::bar) { -// CHECK-NEXT: setLibcallImpl(RTLIB::MEMSET, RTLIB::impl____memset); // ___memset +// CHECK-NEXT: static const LibcallImplPair LibraryCalls_isBarOS[] = { +// CHECK-NEXT: {RTLIB::MEMSET, RTLIB::impl____memset}, // ___memset +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: for (const auto [Func, Impl] : LibraryCalls_isBarOS) { +// CHECK-NEXT: setLibcallImpl(Func, Impl); +// CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: } // CHECK-EMPTY: @@ -231,10 +271,16 @@ def BlahLibrary : SystemRuntimeLibrary<isBlahArch, (add calloc, LibraryWithCondi // CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: if (TT.getArch() == Triple::simple) { -// CHECK-NEXT: setLibcallImpl(RTLIB::CALLOC, RTLIB::impl_calloc); // calloc -// CHECK-NEXT: setLibcallImpl(RTLIB::SHL_I32, RTLIB::impl___ashlsi3); // __ashlsi3 -// CHECK-NEXT: setLibcallImpl(RTLIB::SQRT_F80, RTLIB::impl_sqrtl_f80); // sqrtl -// CHECK-NEXT: setLibcallImpl(RTLIB::SRL_I64, RTLIB::impl___lshrdi3); // __lshrdi3 +// CHECK-NEXT: static const LibcallImplPair LibraryCalls[] = { +// CHECK-NEXT: {RTLIB::CALLOC, RTLIB::impl_calloc}, // calloc +// CHECK-NEXT: {RTLIB::SHL_I32, RTLIB::impl___ashlsi3}, // __ashlsi3 +// CHECK-NEXT: {RTLIB::SQRT_F80, RTLIB::impl_sqrtl_f80}, // sqrtl +// CHECK-NEXT: {RTLIB::SRL_I64, RTLIB::impl___lshrdi3}, // __lshrdi3 +// CHECK-NEXT: }; +// CHECK-EMPTY: +// CHECK-NEXT: for (const auto [Func, Impl] : LibraryCalls) { +// CHECK-NEXT: setLibcallImpl(Func, Impl); +// CHECK-NEXT: } // CHECK-EMPTY: // CHECK-NEXT: return; // CHECK-NEXT: } diff --git a/llvm/test/Transforms/GVN/2011-07-07-MatchIntrinsicExtract.ll b/llvm/test/Transforms/GVN/2011-07-07-MatchIntrinsicExtract.ll index b139e07..acd0317 100644 --- a/llvm/test/Transforms/GVN/2011-07-07-MatchIntrinsicExtract.ll +++ b/llvm/test/Transforms/GVN/2011-07-07-MatchIntrinsicExtract.ll @@ -1,9 +1,19 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s -; %0 = type { i64, i1 } define i64 @test1(i64 %a, i64 %b) nounwind ssp { +; CHECK-LABEL: define i64 @test1( +; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[A]], i64 [[B]]) +; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [[TMP0]] poison, i64 [[TMP1]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1 +; CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[TMP0]] [[TMP2]], i1 [[TMP3]], 1 +; CHECK-NEXT: ret i64 [[TMP1]] +; entry: %uadd = tail call %0 @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) %uadd.0 = extractvalue %0 %uadd, 0 @@ -11,11 +21,17 @@ entry: ret i64 %add1 } -; CHECK-LABEL: @test1( -; CHECK-NOT: add1 -; CHECK: ret - define i64 @test2(i64 %a, i64 %b) nounwind ssp { +; CHECK-LABEL: define i64 @test2( +; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[A]], i64 [[B]]) +; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [[TMP0]] poison, i64 [[TMP1]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1 +; CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[TMP0]] [[TMP2]], i1 [[TMP3]], 1 +; CHECK-NEXT: ret i64 [[TMP1]] +; entry: %usub = tail call %0 @llvm.usub.with.overflow.i64(i64 %a, i64 %b) %usub.0 = extractvalue %0 %usub, 0 @@ -23,11 +39,17 @@ entry: ret i64 %sub1 } -; CHECK-LABEL: @test2( -; CHECK-NOT: sub1 -; CHECK: ret - define i64 @test3(i64 %a, i64 %b) nounwind ssp { +; CHECK-LABEL: define i64 @test3( +; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A]], i64 [[B]]) +; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [[TMP0]] poison, i64 [[TMP1]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1 +; CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[TMP0]] [[TMP2]], i1 [[TMP3]], 1 +; CHECK-NEXT: ret i64 [[TMP1]] +; entry: %umul = tail call %0 @llvm.umul.with.overflow.i64(i64 %a, i64 %b) %umul.0 = extractvalue %0 %umul, 0 @@ -35,11 +57,17 @@ entry: ret i64 %mul1 } -; CHECK-LABEL: @test3( -; CHECK-NOT: mul1 -; CHECK: ret - define i64 @test4(i64 %a, i64 %b) nounwind ssp { +; CHECK-LABEL: define i64 @test4( +; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 [[A]], i64 [[B]]) +; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [[TMP0]] poison, i64 [[TMP1]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1 +; CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[TMP0]] [[TMP2]], i1 [[TMP3]], 1 +; CHECK-NEXT: ret i64 [[TMP1]] +; entry: %sadd = tail call %0 @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) %sadd.0 = extractvalue %0 %sadd, 0 @@ -47,11 +75,17 @@ entry: ret i64 %add1 } -; CHECK-LABEL: @test4( -; CHECK-NOT: add1 -; CHECK: ret - define i64 @test5(i64 %a, i64 %b) nounwind ssp { +; CHECK-LABEL: define i64 @test5( +; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 [[A]], i64 [[B]]) +; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [[TMP0]] poison, i64 [[TMP1]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1 +; CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[TMP0]] [[TMP2]], i1 [[TMP3]], 1 +; CHECK-NEXT: ret i64 [[TMP1]] +; entry: %ssub = tail call %0 @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) %ssub.0 = extractvalue %0 %ssub, 0 @@ -59,11 +93,17 @@ entry: ret i64 %sub1 } -; CHECK-LABEL: @test5( -; CHECK-NOT: sub1 -; CHECK: ret - define i64 @test6(i64 %a, i64 %b) nounwind ssp { +; CHECK-LABEL: define i64 @test6( +; CHECK-SAME: i64 [[A:%.*]], i64 [[B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 [[A]], i64 [[B]]) +; CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = insertvalue [[TMP0]] poison, i64 [[TMP1]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1 +; CHECK-NEXT: [[TMP4:%.*]] = insertvalue [[TMP0]] [[TMP2]], i1 [[TMP3]], 1 +; CHECK-NEXT: ret i64 [[TMP1]] +; entry: %smul = tail call %0 @llvm.smul.with.overflow.i64(i64 %a, i64 %b) %smul.0 = extractvalue %0 %smul, 0 @@ -71,10 +111,6 @@ entry: ret i64 %mul1 } -; CHECK-LABEL: @test6( -; CHECK-NOT: mul1 -; CHECK: ret - declare void @exit(i32) noreturn declare %0 @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone declare %0 @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone @@ -82,4 +118,3 @@ declare %0 @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone declare %0 @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone declare %0 @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone declare %0 @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone - diff --git a/llvm/test/Transforms/GVN/2011-09-07-TypeIdFor.ll b/llvm/test/Transforms/GVN/2011-09-07-TypeIdFor.ll index 01cc3164..52e6a8e 100644 --- a/llvm/test/Transforms/GVN/2011-09-07-TypeIdFor.ll +++ b/llvm/test/Transforms/GVN/2011-09-07-TypeIdFor.ll @@ -1,4 +1,6 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s + %struct.__fundamental_type_info_pseudo = type { %struct.__type_info_pseudo } %struct.__type_info_pseudo = type { ptr, ptr } @@ -18,26 +20,70 @@ declare void @__cxa_end_catch() declare i32 @__gxx_personality_v0(i32, i64, ptr, ptr) define void @_Z3foov() uwtable personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: define void @_Z3foov( +; CHECK-SAME: ) #[[ATTR1:[0-9]+]] personality ptr @__gxx_personality_v0 { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: invoke void @_Z4barv() +; CHECK-NEXT: to label %[[RETURN:.*]] unwind label %[[LPAD:.*]] +; CHECK: [[LPAD]]: +; CHECK-NEXT: [[TMP0:%.*]] = landingpad { ptr, i32 } +; CHECK-NEXT: catch ptr @_ZTIi +; CHECK-NEXT: catch ptr @_ZTIb +; CHECK-NEXT: catch ptr @_ZTIi +; CHECK-NEXT: catch ptr @_ZTIb +; CHECK-NEXT: [[EXC_PTR2_I:%.*]] = extractvalue { ptr, i32 } [[TMP0]], 0 +; CHECK-NEXT: [[FILTER3_I:%.*]] = extractvalue { ptr, i32 } [[TMP0]], 1 +; CHECK-NEXT: [[TYPEID_I:%.*]] = tail call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIi) +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[FILTER3_I]], [[TYPEID_I]] +; CHECK-NEXT: br i1 [[TMP1]], label %[[PPAD:.*]], label %[[NEXT:.*]] +; CHECK: [[NEXT]]: +; CHECK-NEXT: [[TYPEID1_I:%.*]] = tail call i32 @llvm.eh.typeid.for.p0(ptr @_ZTIb) +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[FILTER3_I]], [[TYPEID1_I]] +; CHECK-NEXT: br i1 [[TMP2]], label %[[PPAD2:.*]], label %[[NEXT2:.*]] +; CHECK: [[PPAD]]: +; CHECK-NEXT: [[TMP3:%.*]] = tail call ptr @__cxa_begin_catch(ptr [[EXC_PTR2_I]]) #[[ATTR0:[0-9]+]] +; CHECK-NEXT: tail call void @__cxa_end_catch() #[[ATTR0]] +; CHECK-NEXT: br label %[[RETURN]] +; CHECK: [[PPAD2]]: +; CHECK-NEXT: [[D_2073_5_I:%.*]] = tail call ptr @__cxa_begin_catch(ptr [[EXC_PTR2_I]]) #[[ATTR0]] +; CHECK-NEXT: tail call void @__cxa_end_catch() #[[ATTR0]] +; CHECK-NEXT: br label %[[RETURN]] +; CHECK: [[NEXT2]]: +; CHECK-NEXT: call void @_Z7cleanupv() +; CHECK-NEXT: br i1 false, label %[[PPAD3:.*]], label %[[NEXT3:.*]] +; CHECK: [[NEXT3]]: +; CHECK-NEXT: br i1 false, label %[[PPAD4:.*]], label %[[UNWIND:.*]] +; CHECK: [[UNWIND]]: +; CHECK-NEXT: resume { ptr, i32 } [[TMP0]] +; CHECK: [[PPAD3]]: +; CHECK-NEXT: [[TMP4:%.*]] = tail call ptr @__cxa_begin_catch(ptr [[EXC_PTR2_I]]) #[[ATTR0]] +; CHECK-NEXT: tail call void @__cxa_end_catch() #[[ATTR0]] +; CHECK-NEXT: br label %[[RETURN]] +; CHECK: [[PPAD4]]: +; CHECK-NEXT: [[D_2080_5:%.*]] = tail call ptr @__cxa_begin_catch(ptr [[EXC_PTR2_I]]) #[[ATTR0]] +; CHECK-NEXT: tail call void @__cxa_end_catch() #[[ATTR0]] +; CHECK-NEXT: br label %[[RETURN]] +; CHECK: [[RETURN]]: +; CHECK-NEXT: ret void +; entry: invoke void @_Z4barv() - to label %return unwind label %lpad + to label %return unwind label %lpad lpad: ; preds = %entry %0 = landingpad { ptr, i32 } - catch ptr @_ZTIi - catch ptr @_ZTIb - catch ptr @_ZTIi - catch ptr @_ZTIb + catch ptr @_ZTIi + catch ptr @_ZTIb + catch ptr @_ZTIi + catch ptr @_ZTIb %exc_ptr2.i = extractvalue { ptr, i32 } %0, 0 %filter3.i = extractvalue { ptr, i32 } %0, 1 %typeid.i = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi) -; CHECK: call i32 @llvm.eh.typeid.for %1 = icmp eq i32 %filter3.i, %typeid.i br i1 %1, label %ppad, label %next next: ; preds = %lpad %typeid1.i = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIb) -; CHECK: call i32 @llvm.eh.typeid.for %2 = icmp eq i32 %filter3.i, %typeid1.i br i1 %2, label %ppad2, label %next2 @@ -54,7 +100,6 @@ ppad2: ; preds = %next next2: ; preds = %next call void @_Z7cleanupv() %typeid = tail call i32 @llvm.eh.typeid.for(ptr @_ZTIi) -; CHECK-NOT: call i32 @llvm.eh.typeid.for %4 = icmp eq i32 %filter3.i, %typeid br i1 %4, label %ppad3, label %next3 diff --git a/llvm/test/Transforms/GVN/2012-05-22-PreCrash.ll b/llvm/test/Transforms/GVN/2012-05-22-PreCrash.ll index 28b7178..205dff7 100644 --- a/llvm/test/Transforms/GVN/2012-05-22-PreCrash.ll +++ b/llvm/test/Transforms/GVN/2012-05-22-PreCrash.ll @@ -1,7 +1,35 @@ -; RUN: opt < %s -passes=gvn +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -passes=gvn -S | FileCheck %s + ; PR12858 define void @fn5(i16 signext %p1, i8 signext %p2, i1 %arg) nounwind uwtable { +; CHECK-LABEL: define void @fn5( +; CHECK-SAME: i16 signext [[P1:%.*]], i8 signext [[P2:%.*]], i1 [[ARG:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br i1 [[ARG]], label %[[IF_ELSE:.*]], label %[[IF_THEN:.*]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: [[DOTPRE:%.*]] = sext i16 [[P1]] to i32 +; CHECK-NEXT: br label %[[IF_END:.*]] +; CHECK: [[IF_ELSE]]: +; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[P1]] to i32 +; CHECK-NEXT: br label %[[IF_END]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: [[CONV1_PRE_PHI:%.*]] = phi i32 [ [[CONV]], %[[IF_ELSE]] ], [ [[DOTPRE]], %[[IF_THEN]] ] +; CHECK-NEXT: br i1 [[ARG]], label %[[IF_THEN3:.*]], label %[[IF_ELSE4:.*]] +; CHECK: [[IF_THEN3]]: +; CHECK-NEXT: [[DOTPRE1:%.*]] = sext i8 [[P2]] to i32 +; CHECK-NEXT: br label %[[IF_END12:.*]] +; CHECK: [[IF_ELSE4]]: +; CHECK-NEXT: [[CONV7:%.*]] = sext i8 [[P2]] to i32 +; CHECK-NEXT: [[CMP8:%.*]] = icmp eq i32 [[CONV1_PRE_PHI]], [[CONV7]] +; CHECK-NEXT: br i1 [[CMP8]], label %[[IF_THEN10:.*]], label %[[IF_END12]] +; CHECK: [[IF_THEN10]]: +; CHECK-NEXT: br label %[[IF_END12]] +; CHECK: [[IF_END12]]: +; CHECK-NEXT: [[CONV13_PRE_PHI:%.*]] = phi i32 [ [[CONV7]], %[[IF_THEN10]] ], [ [[CONV7]], %[[IF_ELSE4]] ], [ [[DOTPRE1]], %[[IF_THEN3]] ] +; CHECK-NEXT: ret void +; entry: br i1 %arg, label %if.else, label %if.then diff --git a/llvm/test/Transforms/GVN/2016-08-30-MaskedScatterGather-inseltpoison.ll b/llvm/test/Transforms/GVN/2016-08-30-MaskedScatterGather-inseltpoison.ll index c2b123b..aeb3de9 100644 --- a/llvm/test/Transforms/GVN/2016-08-30-MaskedScatterGather-inseltpoison.ll +++ b/llvm/test/Transforms/GVN/2016-08-30-MaskedScatterGather-inseltpoison.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s declare void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> , <2 x ptr> , i32 , <2 x i1> ) @@ -5,14 +6,29 @@ declare <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x ; This test ensures that masked scatter and gather operations, which take vectors of pointers, ; do not have pointer aliasing ignored when being processed. -; No scatter/gather calls should end up eliminated -; CHECK: llvm.masked.gather -; CHECK: llvm.masked.gather -; CHECK: llvm.masked.scatter -; CHECK: llvm.masked.gather -; CHECK: llvm.masked.scatter -; CHECK: llvm.masked.gather +; No scatter/gather calls should end up eliminated. + define spir_kernel void @test(<2 x ptr> %in1, <2 x ptr> %in2, ptr %out) { +; CHECK-LABEL: define spir_kernel void @test( +; CHECK-SAME: <2 x ptr> [[IN1:%.*]], <2 x ptr> [[IN2:%.*]], ptr [[OUT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP_0:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[TMP_1:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[TMP_I:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP_0]], i32 0 +; CHECK-NEXT: [[TMP:%.*]] = insertelement <2 x ptr> [[TMP_I]], ptr [[TMP_1]], i32 1 +; CHECK-NEXT: [[IN1_V:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> [[IN1]], i32 1, <2 x i1> splat (i1 true), <2 x i32> undef) +; CHECK-NEXT: [[IN2_V:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> [[IN2]], i32 1, <2 x i1> splat (i1 true), <2 x i32> undef) +; CHECK-NEXT: call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> [[IN1_V]], <2 x ptr> [[TMP]], i32 1, <2 x i1> splat (i1 true)) +; CHECK-NEXT: [[TMP_V_0:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> [[TMP]], i32 1, <2 x i1> splat (i1 true), <2 x i32> undef) +; CHECK-NEXT: call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> [[IN2_V]], <2 x ptr> [[TMP]], i32 1, <2 x i1> splat (i1 true)) +; CHECK-NEXT: [[TMP_V_1:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> [[TMP]], i32 1, <2 x i1> splat (i1 true), <2 x i32> undef) +; CHECK-NEXT: [[TMP_V_1_0:%.*]] = extractelement <2 x i32> [[TMP_V_1]], i32 0 +; CHECK-NEXT: [[TMP_V_1_1:%.*]] = extractelement <2 x i32> [[TMP_V_1]], i32 1 +; CHECK-NEXT: store i32 [[TMP_V_1_0]], ptr [[OUT]], align 4 +; CHECK-NEXT: [[OUT_1:%.*]] = getelementptr i32, ptr [[OUT]], i32 1 +; CHECK-NEXT: store i32 [[TMP_V_1_1]], ptr [[OUT_1]], align 4 +; CHECK-NEXT: ret void +; entry: ; Just some temporary storage %tmp.0 = alloca i32 diff --git a/llvm/test/Transforms/GVN/2016-08-30-MaskedScatterGather.ll b/llvm/test/Transforms/GVN/2016-08-30-MaskedScatterGather.ll index e18f388..4c00060 100644 --- a/llvm/test/Transforms/GVN/2016-08-30-MaskedScatterGather.ll +++ b/llvm/test/Transforms/GVN/2016-08-30-MaskedScatterGather.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s declare void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> , <2 x ptr> , i32 , <2 x i1> ) @@ -5,14 +6,29 @@ declare <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x ; This test ensures that masked scatter and gather operations, which take vectors of pointers, ; do not have pointer aliasing ignored when being processed. -; No scatter/gather calls should end up eliminated -; CHECK: llvm.masked.gather -; CHECK: llvm.masked.gather -; CHECK: llvm.masked.scatter -; CHECK: llvm.masked.gather -; CHECK: llvm.masked.scatter -; CHECK: llvm.masked.gather +; No scatter/gather calls should end up eliminated. + define spir_kernel void @test(<2 x ptr> %in1, <2 x ptr> %in2, ptr %out) { +; CHECK-LABEL: define spir_kernel void @test( +; CHECK-SAME: <2 x ptr> [[IN1:%.*]], <2 x ptr> [[IN2:%.*]], ptr [[OUT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP_0:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[TMP_1:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[TMP_I:%.*]] = insertelement <2 x ptr> undef, ptr [[TMP_0]], i32 0 +; CHECK-NEXT: [[TMP:%.*]] = insertelement <2 x ptr> [[TMP_I]], ptr [[TMP_1]], i32 1 +; CHECK-NEXT: [[IN1_V:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> [[IN1]], i32 1, <2 x i1> splat (i1 true), <2 x i32> undef) +; CHECK-NEXT: [[IN2_V:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> [[IN2]], i32 1, <2 x i1> splat (i1 true), <2 x i32> undef) +; CHECK-NEXT: call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> [[IN1_V]], <2 x ptr> [[TMP]], i32 1, <2 x i1> splat (i1 true)) +; CHECK-NEXT: [[TMP_V_0:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> [[TMP]], i32 1, <2 x i1> splat (i1 true), <2 x i32> undef) +; CHECK-NEXT: call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> [[IN2_V]], <2 x ptr> [[TMP]], i32 1, <2 x i1> splat (i1 true)) +; CHECK-NEXT: [[TMP_V_1:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> [[TMP]], i32 1, <2 x i1> splat (i1 true), <2 x i32> undef) +; CHECK-NEXT: [[TMP_V_1_0:%.*]] = extractelement <2 x i32> [[TMP_V_1]], i32 0 +; CHECK-NEXT: [[TMP_V_1_1:%.*]] = extractelement <2 x i32> [[TMP_V_1]], i32 1 +; CHECK-NEXT: store i32 [[TMP_V_1_0]], ptr [[OUT]], align 4 +; CHECK-NEXT: [[OUT_1:%.*]] = getelementptr i32, ptr [[OUT]], i32 1 +; CHECK-NEXT: store i32 [[TMP_V_1_1]], ptr [[OUT_1]], align 4 +; CHECK-NEXT: ret void +; entry: ; Just some temporary storage %tmp.0 = alloca i32 diff --git a/llvm/test/Transforms/GVN/MemdepMiscompile.ll b/llvm/test/Transforms/GVN/MemdepMiscompile.ll index cb9b011..7c8accb 100644 --- a/llvm/test/Transforms/GVN/MemdepMiscompile.ll +++ b/llvm/test/Transforms/GVN/MemdepMiscompile.ll @@ -1,4 +1,6 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s + target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-macosx10.7.0" @@ -7,14 +9,38 @@ target triple = "x86_64-apple-macosx10.7.0" ; Make sure we do not replace load %shouldExit in while.cond.backedge ; with a phi node where the value from while.body is 0. define i32 @test() nounwind ssp { +; CHECK-LABEL: define i32 @test( +; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SHOULDEXIT:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[TASKSIDLE:%.*]] = alloca i32, align 4 +; CHECK-NEXT: store i32 0, ptr [[SHOULDEXIT]], align 4 +; CHECK-NEXT: store i32 0, ptr [[TASKSIDLE]], align 4 +; CHECK-NEXT: call void @CTestInitialize(ptr [[TASKSIDLE]]) #[[ATTR1:[0-9]+]] +; CHECK-NEXT: br i1 true, label %[[WHILE_BODY_LR_PH:.*]], label %[[ENTRY_WHILE_END_CRIT_EDGE:.*]] +; CHECK: [[ENTRY_WHILE_END_CRIT_EDGE]]: +; CHECK-NEXT: br label %[[WHILE_END:.*]] +; CHECK: [[WHILE_BODY_LR_PH]]: +; CHECK-NEXT: br label %[[WHILE_BODY:.*]] +; CHECK: [[WHILE_BODY]]: +; CHECK-NEXT: call void @RunInMode(i32 100) #[[ATTR1]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[TASKSIDLE]], align 4 +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP0]], 0 +; CHECK-NEXT: br i1 [[TOBOOL]], label %[[WHILE_COND_BACKEDGE:.*]], label %[[IF_THEN:.*]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: store i32 0, ptr [[TASKSIDLE]], align 4 +; CHECK-NEXT: call void @TimerCreate(ptr [[SHOULDEXIT]]) #[[ATTR1]] +; CHECK-NEXT: br label %[[WHILE_COND_BACKEDGE]] +; CHECK: [[WHILE_COND_BACKEDGE]]: +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[SHOULDEXIT]], align 4 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[CMP]], label %[[WHILE_BODY]], label %[[WHILE_COND_WHILE_END_CRIT_EDGE:.*]] +; CHECK: [[WHILE_COND_WHILE_END_CRIT_EDGE]]: +; CHECK-NEXT: br label %[[WHILE_END]] +; CHECK: [[WHILE_END]]: +; CHECK-NEXT: ret i32 0 +; entry: -; CHECK: test() -; CHECK: while.body: -; CHECK: call void @RunInMode -; CHECK: br i1 %tobool, label %while.cond.backedge, label %if.then -; CHECK: while.cond.backedge: -; CHECK: load i32, ptr %shouldExit -; CHECK: br i1 %cmp, label %while.body %shouldExit = alloca i32, align 4 %tasksIdle = alloca i32, align 4 store i32 0, ptr %shouldExit, align 4 diff --git a/llvm/test/Transforms/GVN/basic-undef-test.ll b/llvm/test/Transforms/GVN/basic-undef-test.ll index d12c3db..459ef25 100644 --- a/llvm/test/Transforms/GVN/basic-undef-test.ll +++ b/llvm/test/Transforms/GVN/basic-undef-test.ll @@ -1,15 +1,22 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s -; ModuleID = 'test3.ll' + target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +; RLE over the second load. define i32 @main(ptr %foo) { +; CHECK-LABEL: define i32 @main( +; CHECK-SAME: ptr [[FOO:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[FOO]], align 4 +; CHECK-NEXT: store i32 5, ptr undef, align 4 +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], [[TMP0]] +; CHECK-NEXT: ret i32 [[TMP1]] +; entry: -; CHECK: load i32, ptr %foo, align 4 %0 = load i32, ptr %foo, align 4 store i32 5, ptr undef, align 4 -; CHECK-NOT: load i32, ptr %foo, align 4 %1 = load i32, ptr %foo, align 4 -; CHECK: add i32 %0, %0 %2 = add i32 %0, %1 ret i32 %2 } diff --git a/llvm/test/Transforms/GVN/bitcast-of-call.ll b/llvm/test/Transforms/GVN/bitcast-of-call.ll index 6c4e8d2..3f40085 100644 --- a/llvm/test/Transforms/GVN/bitcast-of-call.ll +++ b/llvm/test/Transforms/GVN/bitcast-of-call.ll @@ -1,13 +1,20 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s + ; PR2213 define ptr @f(ptr %x) { +; CHECK-LABEL: define ptr @f( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP:%.*]] = call ptr @m(i32 12) +; CHECK-NEXT: ret ptr [[TMP]] +; entry: - %tmp = call ptr @m( i32 12 ) ; <ptr> [#uses=2] - %tmp1 = bitcast ptr %tmp to ptr ; <ptr> [#uses=0] - %tmp2 = bitcast ptr %tmp to ptr ; <ptr> [#uses=0] -; CHECK-NOT: %tmp2 - ret ptr %tmp2 + %tmp = call ptr @m(i32 12) ; <ptr> [#uses=2] + %tmp1 = bitcast ptr %tmp to ptr ; <ptr> [#uses=0] + %tmp2 = bitcast ptr %tmp to ptr ; <ptr> [#uses=0] + ret ptr %tmp2 } declare ptr @m(i32) diff --git a/llvm/test/Transforms/GVN/br-identical.ll b/llvm/test/Transforms/GVN/br-identical.ll index 9997e01..5266889 100644 --- a/llvm/test/Transforms/GVN/br-identical.ll +++ b/llvm/test/Transforms/GVN/br-identical.ll @@ -1,8 +1,32 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S -o - %s | FileCheck %s ; If a branch has two identical successors, we cannot declare either dead. - define void @widget(i1 %p) { +; CHECK-LABEL: define void @widget( +; CHECK-SAME: i1 [[P:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[BB2:.*]] +; CHECK: [[BB2]]: +; CHECK-NEXT: [[T1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[T2:%.*]], %[[BB7:.*]] ] +; CHECK-NEXT: [[T2]] = add i64 [[T1]], 1 +; CHECK-NEXT: [[T3:%.*]] = icmp ult i64 0, [[T2]] +; CHECK-NEXT: br i1 [[T3]], label %[[BB3:.*]], label %[[BB4:.*]] +; CHECK: [[BB3]]: +; CHECK-NEXT: [[T4:%.*]] = call i64 @f() +; CHECK-NEXT: br label %[[BB4]] +; CHECK: [[BB4]]: +; CHECK-NEXT: [[FOO:%.*]] = phi i64 [ [[T4]], %[[BB3]] ], [ 0, %[[BB2]] ] +; CHECK-NEXT: br i1 [[P]], label %[[BB5:.*]], label %[[BB6:.*]] +; CHECK: [[BB5]]: +; CHECK-NEXT: br i1 true, label %[[BB7]], label %[[BB7]] +; CHECK: [[BB6]]: +; CHECK-NEXT: br i1 true, label %[[BB7]], label %[[BB7]] +; CHECK: [[BB7]]: +; CHECK-NEXT: br i1 [[P]], label %[[BB2]], label %[[BB8:.*]] +; CHECK: [[BB8]]: +; CHECK-NEXT: ret void +; entry: br label %bb2 @@ -17,7 +41,6 @@ bb3: br label %bb4 bb4: - ; CHECK-NOT: phi {{.*}} undef %foo = phi i64 [ %t4, %bb3 ], [ 0, %bb2 ] br i1 %p, label %bb5, label %bb6 diff --git a/llvm/test/Transforms/GVN/calls-nonlocal.ll b/llvm/test/Transforms/GVN/calls-nonlocal.ll index e891545..4340d57 100644 --- a/llvm/test/Transforms/GVN/calls-nonlocal.ll +++ b/llvm/test/Transforms/GVN/calls-nonlocal.ll @@ -1,75 +1,78 @@ -; Two occurrences of strlen should be zapped. +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s + +; Two occurrences of strlen should be zapped. target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" target triple = "i386-apple-darwin9" define i32 @test(i32 %g, ptr %P) nounwind { +; CHECK-LABEL: define i32 @test( +; CHECK-SAME: i32 [[G:%.*]], ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @strlen(ptr [[P]]) #[[ATTR1:[0-9]+]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], 100 +; CHECK-NEXT: [[TMP34:%.*]] = zext i1 [[TMP3]] to i8 +; CHECK-NEXT: br i1 [[TMP3]], label %[[BB:.*]], label %[[BB6:.*]] +; CHECK: [[BB]]: +; CHECK-NEXT: br label %[[BB27:.*]] +; CHECK: [[BB6]]: +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[G]], 42 +; CHECK-NEXT: br i1 false, label %[[BB14:.*]], label %[[BB16:.*]] +; CHECK: [[BB14]]: +; CHECK-NEXT: br label %[[BB27]] +; CHECK: [[BB16]]: +; CHECK-NEXT: [[TMP18:%.*]] = mul i32 [[TMP8]], 2 +; CHECK-NEXT: br i1 false, label %[[BB24:.*]], label %[[BB26:.*]] +; CHECK: [[BB24]]: +; CHECK-NEXT: br label %[[BB27]] +; CHECK: [[BB26]]: +; CHECK-NEXT: br label %[[BB27]] +; CHECK: [[BB27]]: +; CHECK-NEXT: [[TMP_0:%.*]] = phi i32 [ 11, %[[BB26]] ], [ poison, %[[BB24]] ], [ poison, %[[BB14]] ], [ [[G]], %[[BB]] ] +; CHECK-NEXT: ret i32 [[TMP_0]] +; entry: - %tmp2 = call i32 @strlen( ptr %P ) nounwind readonly ; <i32> [#uses=1] - %tmp3 = icmp eq i32 %tmp2, 100 ; <i1> [#uses=1] - %tmp34 = zext i1 %tmp3 to i8 ; <i8> [#uses=1] - %toBool = icmp ne i8 %tmp34, 0 ; <i1> [#uses=1] - br i1 %toBool, label %bb, label %bb6 + %tmp2 = call i32 @strlen( ptr %P ) nounwind readonly ; <i32> [#uses=1] + %tmp3 = icmp eq i32 %tmp2, 100 ; <i1> [#uses=1] + %tmp34 = zext i1 %tmp3 to i8 ; <i8> [#uses=1] + %toBool = icmp ne i8 %tmp34, 0 ; <i1> [#uses=1] + br i1 %toBool, label %bb, label %bb6 bb: ; preds = %entry - br label %bb27 + br label %bb27 bb6: ; preds = %entry - %tmp8 = add i32 %g, 42 ; <i32> [#uses=2] - %tmp10 = call i32 @strlen( ptr %P ) nounwind readonly ; <i32> [#uses=1] - %tmp11 = icmp eq i32 %tmp10, 100 ; <i1> [#uses=1] - %tmp1112 = zext i1 %tmp11 to i8 ; <i8> [#uses=1] - %toBool13 = icmp ne i8 %tmp1112, 0 ; <i1> [#uses=1] - br i1 %toBool13, label %bb14, label %bb16 + %tmp8 = add i32 %g, 42 ; <i32> [#uses=2] + %tmp10 = call i32 @strlen( ptr %P ) nounwind readonly ; <i32> [#uses=1] + %tmp11 = icmp eq i32 %tmp10, 100 ; <i1> [#uses=1] + %tmp1112 = zext i1 %tmp11 to i8 ; <i8> [#uses=1] + %toBool13 = icmp ne i8 %tmp1112, 0 ; <i1> [#uses=1] + br i1 %toBool13, label %bb14, label %bb16 bb14: ; preds = %bb6 - br label %bb27 + br label %bb27 bb16: ; preds = %bb6 - %tmp18 = mul i32 %tmp8, 2 ; <i32> [#uses=1] - %tmp20 = call i32 @strlen( ptr %P ) nounwind readonly ; <i32> [#uses=1] - %tmp21 = icmp eq i32 %tmp20, 100 ; <i1> [#uses=1] - %tmp2122 = zext i1 %tmp21 to i8 ; <i8> [#uses=1] - %toBool23 = icmp ne i8 %tmp2122, 0 ; <i1> [#uses=1] - br i1 %toBool23, label %bb24, label %bb26 + %tmp18 = mul i32 %tmp8, 2 ; <i32> [#uses=1] + %tmp20 = call i32 @strlen( ptr %P ) nounwind readonly ; <i32> [#uses=1] + %tmp21 = icmp eq i32 %tmp20, 100 ; <i1> [#uses=1] + %tmp2122 = zext i1 %tmp21 to i8 ; <i8> [#uses=1] + %toBool23 = icmp ne i8 %tmp2122, 0 ; <i1> [#uses=1] + br i1 %toBool23, label %bb24, label %bb26 bb24: ; preds = %bb16 - br label %bb27 + br label %bb27 bb26: ; preds = %bb16 - br label %bb27 + br label %bb27 bb27: ; preds = %bb26, %bb24, %bb14, %bb - %tmp.0 = phi i32 [ 11, %bb26 ], [ %tmp18, %bb24 ], [ %tmp8, %bb14 ], [ %g, %bb ] ; <i32> [#uses=1] - br label %return + %tmp.0 = phi i32 [ 11, %bb26 ], [ %tmp18, %bb24 ], [ %tmp8, %bb14 ], [ %g, %bb ] ; <i32> [#uses=1] + br label %return return: ; preds = %bb27 - ret i32 %tmp.0 + ret i32 %tmp.0 } -; CHECK: define i32 @test(i32 %g, ptr %P) #0 { -; CHECK: entry: -; CHECK: %tmp2 = call i32 @strlen(ptr %P) #1 -; CHECK: %tmp3 = icmp eq i32 %tmp2, 100 -; CHECK: %tmp34 = zext i1 %tmp3 to i8 -; CHECK: br i1 %tmp3, label %bb, label %bb6 -; CHECK: bb: -; CHECK: br label %bb27 -; CHECK: bb6: -; CHECK: %tmp8 = add i32 %g, 42 -; CHECK: br i1 false, label %bb14, label %bb16 -; CHECK: bb14: -; CHECK: br label %bb27 -; CHECK: bb16: -; CHECK: %tmp18 = mul i32 %tmp8, 2 -; CHECK: br i1 false, label %bb24, label %bb26 -; CHECK: bb24: -; CHECK: br label %bb27 -; CHECK: bb26: -; CHECK: br label %bb27 -; CHECK: bb27: -; CHECK: %tmp.0 = phi i32 [ 11, %bb26 ], [ poison, %bb24 ], [ poison, %bb14 ], [ %g, %bb ] -; CHECK: ret i32 %tmp.0 -; CHECK: } -declare i32 @strlen(ptr) nounwind readonly +declare i32 @strlen(ptr) nounwind readonly diff --git a/llvm/test/Transforms/GVN/calls-readonly.ll b/llvm/test/Transforms/GVN/calls-readonly.ll index b4855e4..2fb5621 100644 --- a/llvm/test/Transforms/GVN/calls-readonly.ll +++ b/llvm/test/Transforms/GVN/calls-readonly.ll @@ -1,10 +1,28 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s + ; Should delete the second call to strlen even though the intervening strchr call exists. target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" target triple = "i386-apple-darwin7" define ptr @test(ptr %P, ptr %Q, i32 %x, i32 %y) nounwind readonly { +; CHECK-LABEL: define ptr @test( +; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i32 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @strlen(ptr [[P]]), !prof [[PROF0:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 0 +; CHECK-NEXT: br i1 [[TMP1]], label %[[BB:.*]], label %[[BB1:.*]] +; CHECK: [[BB]]: +; CHECK-NEXT: [[TMP2:%.*]] = sdiv i32 [[X]], [[Y]] +; CHECK-NEXT: br label %[[BB1]] +; CHECK: [[BB1]]: +; CHECK-NEXT: [[X_ADDR_0:%.*]] = phi i32 [ [[TMP2]], %[[BB]] ], [ [[X]], %[[ENTRY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = tail call ptr @strchr(ptr [[Q]], i32 97) +; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[X_ADDR_0]], [[TMP0]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP3]], i32 [[X_ADDR_0]] +; CHECK-NEXT: ret ptr [[TMP5]] +; entry: %0 = tail call i32 @strlen(ptr %P), !prof !0 ; <i32> [#uses=2] %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1] @@ -24,21 +42,6 @@ bb1: ; preds = %bb, %entry ret ptr %6 } -; CHECK: define ptr @test(ptr %P, ptr %Q, i32 %x, i32 %y) #0 { -; CHECK: entry: -; CHECK-NEXT: %0 = tail call i32 @strlen(ptr %P), !prof !0 -; CHECK-NEXT: %1 = icmp eq i32 %0, 0 -; CHECK-NEXT: br i1 %1, label %bb, label %bb1 -; CHECK: bb: -; CHECK-NEXT: %2 = sdiv i32 %x, %y -; CHECK-NEXT: br label %bb1 -; CHECK: bb1: -; CHECK-NEXT: %x_addr.0 = phi i32 [ %2, %bb ], [ %x, %entry ] -; CHECK-NEXT: %3 = tail call ptr @strchr(ptr %Q, i32 97) -; CHECK-NEXT: %4 = add i32 %x_addr.0, %0 -; CHECK-NEXT: %5 = getelementptr i8, ptr %3, i32 %x_addr.0 -; CHECK-NEXT: ret ptr %5 -; CHECK: } declare i32 @strlen(ptr) nounwind readonly @@ -46,3 +49,6 @@ declare ptr @strchr(ptr, i32) nounwind readonly !0 = !{!"branch_weights", i32 95} !1 = !{!"branch_weights", i32 95} +;. +; CHECK: [[PROF0]] = !{!"branch_weights", i64 190} +;. diff --git a/llvm/test/Transforms/GVN/cond_br.ll b/llvm/test/Transforms/GVN/cond_br.ll index fb84b62..10ee3a0 100644 --- a/llvm/test/Transforms/GVN/cond_br.ll +++ b/llvm/test/Transforms/GVN/cond_br.ll @@ -1,12 +1,25 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s + @y = external global i32 @z = external global i32 ; Function Attrs: nounwind ssp uwtable define void @foo(i32 %x) { -; CHECK: @foo(i32 %x) -; CHECK: %.pre = load i32, ptr @y -; CHECK: call void @bar(i32 %.pre) +; CHECK-LABEL: define void @foo( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr @y, align 4 +; CHECK-NEXT: br i1 false, label %[[IF_THEN:.*]], label %[[ENTRY_IF_END_CRIT_EDGE:.*]] +; CHECK: [[ENTRY_IF_END_CRIT_EDGE]]: +; CHECK-NEXT: br label %[[IF_END:.*]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[X]], 3 +; CHECK-NEXT: store i32 [[ADD]], ptr @y, align 4 +; CHECK-NEXT: br label %[[IF_END]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: tail call void @bar(i32 [[DOTPRE]]) +; CHECK-NEXT: ret void +; %t = sub i32 %x, %x %.pre = load i32, ptr @y, align 4 @@ -28,9 +41,22 @@ if.end: ; preds = %entry.if.end_crit_e } define void @foo2(i32 %x) { -; CHECK: @foo2(i32 %x) -; CHECK: %.pre = load i32, ptr @y -; CHECK: tail call void @bar(i32 %.pre) +; CHECK-LABEL: define void @foo2( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr @y, align 4 +; CHECK-NEXT: br i1 false, label %[[IF_THEN:.*]], label %[[IF_ELSE:.*]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[X]], 3 +; CHECK-NEXT: store i32 [[ADD]], ptr @y, align 4 +; CHECK-NEXT: br label %[[IF_END:.*]] +; CHECK: [[IF_ELSE]]: +; CHECK-NEXT: store i32 1, ptr @z, align 4 +; CHECK-NEXT: br label %[[IF_END]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: tail call void @bar(i32 [[DOTPRE]]) +; CHECK-NEXT: ret void +; entry: %t = sub i32 %x, %x %.pre = load i32, ptr @y, align 4 diff --git a/llvm/test/Transforms/GVN/cond_br2.ll b/llvm/test/Transforms/GVN/cond_br2.ll index ff80328..6ceec95 100644 --- a/llvm/test/Transforms/GVN/cond_br2.ll +++ b/llvm/test/Transforms/GVN/cond_br2.ll @@ -1,4 +1,6 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s + target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" %"class.llvm::SmallVector" = type { %"class.llvm::SmallVectorImpl", [1 x %"union.llvm::SmallVectorBase::U"] } @@ -10,10 +12,77 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 ; Function Attrs: ssp uwtable define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 { -; CHECK: @_Z4testv() -; CHECK: invoke.cont: -; CHECK: br i1 true, label %new.notnull.i11, label %if.end.i14 -; CHECK: Retry.i10: +; CHECK-LABEL: define void @_Z4testv( +; CHECK-SAME: ) #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[SV:%.*]] = alloca %"class.llvm::SmallVector", align 16 +; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[SV]]) #[[ATTR4:[0-9]+]] +; CHECK-NEXT: [[FIRSTEL_I_I_I_I_I_I:%.*]] = getelementptr inbounds %"class.llvm::SmallVector", ptr [[SV]], i64 0, i32 0, i32 0, i32 0, i32 0, i32 3 +; CHECK-NEXT: store ptr [[FIRSTEL_I_I_I_I_I_I]], ptr [[SV]], align 16, !tbaa [[ANYPTR_TBAA0:![0-9]+]] +; CHECK-NEXT: [[ENDX_I_I_I_I_I_I:%.*]] = getelementptr inbounds %"class.llvm::SmallVector", ptr [[SV]], i64 0, i32 0, i32 0, i32 0, i32 0, i32 1 +; CHECK-NEXT: store ptr [[FIRSTEL_I_I_I_I_I_I]], ptr [[ENDX_I_I_I_I_I_I]], align 8, !tbaa [[ANYPTR_TBAA0]] +; CHECK-NEXT: [[CAPACITYX_I_I_I_I_I_I:%.*]] = getelementptr inbounds %"class.llvm::SmallVector", ptr [[SV]], i64 0, i32 0, i32 0, i32 0, i32 0, i32 2 +; CHECK-NEXT: [[ADD_PTR_I_I_I_I2_I_I:%.*]] = getelementptr inbounds %"union.llvm::SmallVectorBase::U", ptr [[FIRSTEL_I_I_I_I_I_I]], i64 2 +; CHECK-NEXT: store ptr [[ADD_PTR_I_I_I_I2_I_I]], ptr [[CAPACITYX_I_I_I_I_I_I]], align 16, !tbaa [[ANYPTR_TBAA0]] +; CHECK-NEXT: br i1 true, label %[[RETRY_I:.*]], label %[[IF_END_I:.*]] +; CHECK: [[RETRY_I]]: +; CHECK-NEXT: br i1 false, label %[[RETRY_I_INVOKE_CONT_CRIT_EDGE:.*]], label %[[NEW_NOTNULL_I:.*]] +; CHECK: [[RETRY_I_INVOKE_CONT_CRIT_EDGE]]: +; CHECK-NEXT: br label %[[INVOKE_CONT:.*]] +; CHECK: [[NEW_NOTNULL_I]]: +; CHECK-NEXT: store i32 1, ptr [[FIRSTEL_I_I_I_I_I_I]], align 4, !tbaa [[INT_TBAA4:![0-9]+]] +; CHECK-NEXT: br label %[[INVOKE_CONT]] +; CHECK: [[IF_END_I]]: +; CHECK-NEXT: invoke void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr [[SV]], i64 0, i64 4) +; CHECK-NEXT: to [[DOTNOEXC:label %.*]] unwind label %[[LPAD:.*]] +; CHECK: [[_NOEXC:.*:]] +; CHECK-NEXT: [[DOTPRE_I:%.*]] = load ptr, ptr [[ENDX_I_I_I_I_I_I]], align 8, !tbaa [[ANYPTR_TBAA0]] +; CHECK-NEXT: br label %[[RETRY_I]] +; CHECK: [[INVOKE_CONT]]: +; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr inbounds i8, ptr [[FIRSTEL_I_I_I_I_I_I]], i64 4 +; CHECK-NEXT: store ptr [[ADD_PTR_I]], ptr [[ENDX_I_I_I_I_I_I]], align 8, !tbaa [[ANYPTR_TBAA0]] +; CHECK-NEXT: br i1 true, label %[[NEW_NOTNULL_I11:.*]], label %[[IF_END_I14:.*]] +; CHECK: [[RETRY_I10:.*]]: +; CHECK-NEXT: [[DOTPRE_I13:%.*]] = load ptr, ptr [[ENDX_I_I_I_I_I_I]], align 8, !tbaa [[ANYPTR_TBAA0]] +; CHECK-NEXT: [[NEW_ISNULL_I9:%.*]] = icmp eq ptr [[DOTPRE_I13]], null +; CHECK-NEXT: br i1 [[NEW_ISNULL_I9]], label %[[RETRY_I10_INVOKE_CONT2_CRIT_EDGE:.*]], label %[[RETRY_I10_NEW_NOTNULL_I11_CRIT_EDGE:.*]] +; CHECK: [[RETRY_I10_NEW_NOTNULL_I11_CRIT_EDGE]]: +; CHECK-NEXT: br label %[[NEW_NOTNULL_I11]] +; CHECK: [[RETRY_I10_INVOKE_CONT2_CRIT_EDGE]]: +; CHECK-NEXT: br label %[[INVOKE_CONT2:.*]] +; CHECK: [[NEW_NOTNULL_I11]]: +; CHECK-NEXT: store i32 2, ptr [[ADD_PTR_I]], align 4, !tbaa [[INT_TBAA4]] +; CHECK-NEXT: br label %[[INVOKE_CONT2]] +; CHECK: [[IF_END_I14]]: +; CHECK-NEXT: invoke void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr [[SV]], i64 0, i64 4) +; CHECK-NEXT: to label %[[RETRY_I10]] unwind label %[[LPAD]] +; CHECK: [[INVOKE_CONT2]]: +; CHECK-NEXT: [[ADD_PTR_I12:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR_I]], i64 4 +; CHECK-NEXT: store ptr [[ADD_PTR_I12]], ptr [[ENDX_I_I_I_I_I_I]], align 8, !tbaa [[ANYPTR_TBAA0]] +; CHECK-NEXT: invoke void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr [[SV]]) +; CHECK-NEXT: to label %[[INVOKE_CONT3:.*]] unwind label %[[LPAD]] +; CHECK: [[INVOKE_CONT3]]: +; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SV]], align 16, !tbaa [[ANYPTR_TBAA0]] +; CHECK-NEXT: [[CMP_I_I_I_I19:%.*]] = icmp eq ptr [[TMP0]], [[FIRSTEL_I_I_I_I_I_I]] +; CHECK-NEXT: br i1 [[CMP_I_I_I_I19]], label %[[_ZN4LLVM11SMALLVECTORIILJ8EED1EV_EXIT21:.*]], label %[[IF_THEN_I_I_I20:.*]] +; CHECK: [[IF_THEN_I_I_I20]]: +; CHECK-NEXT: call void @free(ptr [[TMP0]]) #[[ATTR4]] +; CHECK-NEXT: br label %[[_ZN4LLVM11SMALLVECTORIILJ8EED1EV_EXIT21]] +; CHECK: [[_ZN4LLVM11SMALLVECTORIILJ8EED1EV_EXIT21]]: +; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[SV]]) #[[ATTR4]] +; CHECK-NEXT: ret void +; CHECK: [[LPAD]]: +; CHECK-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 } +; CHECK-NEXT: cleanup +; CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[SV]], align 16, !tbaa [[ANYPTR_TBAA0]] +; CHECK-NEXT: [[CMP_I_I_I_I:%.*]] = icmp eq ptr [[TMP2]], [[FIRSTEL_I_I_I_I_I_I]] +; CHECK-NEXT: br i1 [[CMP_I_I_I_I]], label %[[EH_RESUME:.*]], label %[[IF_THEN_I_I_I:.*]] +; CHECK: [[IF_THEN_I_I_I]]: +; CHECK-NEXT: call void @free(ptr [[TMP2]]) #[[ATTR4]] +; CHECK-NEXT: br label %[[EH_RESUME]] +; CHECK: [[EH_RESUME]]: +; CHECK-NEXT: resume { ptr, i32 } [[TMP1]] +; entry: %sv = alloca %"class.llvm::SmallVector", align 16 @@ -42,7 +111,7 @@ new.notnull.i: ; preds = %Retry.i if.end.i: ; preds = %entry invoke void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr %sv, i64 0, i64 4) - to label %.noexc unwind label %lpad + to label %.noexc unwind label %lpad .noexc: ; preds = %if.end.i %.pre.i = load ptr, ptr %EndX.i, align 8, !tbaa !4 @@ -67,14 +136,14 @@ new.notnull.i11: ; preds = %invoke.cont, %Retry if.end.i14: ; preds = %invoke.cont invoke void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr %sv, i64 0, i64 4) - to label %Retry.i10 unwind label %lpad + to label %Retry.i10 unwind label %lpad invoke.cont2: ; preds = %new.notnull.i11, %Retry.i10 %4 = phi ptr [ null, %Retry.i10 ], [ %3, %new.notnull.i11 ] %add.ptr.i12 = getelementptr inbounds i8, ptr %4, i64 4 store ptr %add.ptr.i12, ptr %EndX.i, align 8, !tbaa !4 invoke void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr %sv) - to label %invoke.cont3 unwind label %lpad + to label %invoke.cont3 unwind label %lpad invoke.cont3: ; preds = %invoke.cont2 %5 = load ptr, ptr %sv, align 16, !tbaa !4 @@ -91,7 +160,7 @@ _ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.t lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2 %6 = landingpad { ptr, i32 } - cleanup + cleanup %7 = load ptr, ptr %sv, align 16, !tbaa !4 %cmp.i.i.i.i = icmp eq ptr %7, %FirstEl.i.i.i.i.i.i br i1 %cmp.i.i.i.i, label %eh.resume, label %if.then.i.i.i @@ -130,3 +199,11 @@ attributes #3 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "n !3 = !{!"int", !1} !4 = !{!0, !0, i64 0} !5 = !{!3, !3, i64 0} +;. +; CHECK: [[ANYPTR_TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0} +; CHECK: [[META1]] = !{!"any pointer", [[META2:![0-9]+]]} +; CHECK: [[META2]] = !{!"omnipotent char", [[META3:![0-9]+]]} +; CHECK: [[META3]] = !{!"Simple C/C++ TBAA"} +; CHECK: [[INT_TBAA4]] = !{[[META5:![0-9]+]], [[META5]], i64 0} +; CHECK: [[META5]] = !{!"int", [[META2]]} +;. diff --git a/llvm/test/Transforms/GVN/crash-no-aa.ll b/llvm/test/Transforms/GVN/crash-no-aa.ll index 10e6374..f396c10 100644 --- a/llvm/test/Transforms/GVN/crash-no-aa.ll +++ b/llvm/test/Transforms/GVN/crash-no-aa.ll @@ -1,10 +1,19 @@ -; RUN: opt -disable-basic-aa -passes=gvn -S < %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -disable-basic-aa -passes=gvn -S -o - < %s | FileCheck %s + +; PR5744 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-unknown-freebsd8.0" -; PR5744 define i32 @test1(ptr %P) { +; CHECK-LABEL: define i32 @test1( +; CHECK-SAME: ptr [[P:%.*]]) { +; CHECK-NEXT: store i16 42, ptr [[P]], align 2 +; CHECK-NEXT: [[P3:%.*]] = getelementptr { i16, i32 }, ptr [[P]], i32 0, i32 1 +; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[P3]], align 4 +; CHECK-NEXT: ret i32 [[V]] +; %P2 = getelementptr {i16, i32}, ptr %P, i32 0, i32 0 store i16 42, ptr %P2 diff --git a/llvm/test/Transforms/GVN/critical-edge-split-failure.ll b/llvm/test/Transforms/GVN/critical-edge-split-failure.ll index 8eac5fe..40ebe14 100644 --- a/llvm/test/Transforms/GVN/critical-edge-split-failure.ll +++ b/llvm/test/Transforms/GVN/critical-edge-split-failure.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S -o - %s | FileCheck %s %struct.sk_buff = type opaque @@ -10,6 +11,31 @@ declare void @llvm.assume(i1 noundef) define dso_local void @l2tp_recv_dequeue() local_unnamed_addr { +; CHECK-LABEL: define dso_local void @l2tp_recv_dequeue() local_unnamed_addr { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @l2tp_recv_dequeue_session, align 4 +; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = inttoptr i64 [[CONV]] to ptr +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @l2tp_recv_dequeue_session_2, align 4 +; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP2]], 0 +; CHECK-NEXT: br label %[[FOR_COND:.*]] +; CHECK: [[FOR_COND]]: +; CHECK-NEXT: [[STOREMERGE:%.*]] = phi ptr [ [[TMP1]], %[[ENTRY]] ], [ null, %[[IF_END:.*]] ] +; CHECK-NEXT: store ptr [[STOREMERGE]], ptr @l2tp_recv_dequeue_skb, align 8 +; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[IF_END]], label %[[IF_THEN:.*]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[STOREMERGE]], align 4 +; CHECK-NEXT: store i32 [[TMP3]], ptr @l2tp_recv_dequeue_session_0, align 4 +; CHECK-NEXT: callbr void asm sideeffect "", "!i,~{dirflag},~{fpsr},~{flags}"() +; CHECK-NEXT: to label %[[ASM_FALLTHROUGH_I:.*]] [label %if.end] +; CHECK: [[ASM_FALLTHROUGH_I]]: +; CHECK-NEXT: br label %[[IF_END]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[STOREMERGE]], align 4 +; CHECK-NEXT: [[TOBOOL2_NOT:%.*]] = icmp eq i32 [[TMP4]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[TOBOOL2_NOT]]) +; CHECK-NEXT: br label %[[FOR_COND]] +; entry: %0 = load i32, ptr @l2tp_recv_dequeue_session, align 4 %conv = sext i32 %0 to i64 @@ -29,10 +55,8 @@ if.then: ; preds = %for.cond ; Splitting the critical edge from if.then to if.end will fail, but should not ; cause an infinite loop in GVN. If we can one day split edges of callbr ; indirect targets, great! -; CHECK: callbr void asm sideeffect "", "!i,~{dirflag},~{fpsr},~{flags}"() -; CHECK-NEXT: to label %asm.fallthrough.i [label %if.end] callbr void asm sideeffect "", "!i,~{dirflag},~{fpsr},~{flags}"() - to label %asm.fallthrough.i [label %if.end] + to label %asm.fallthrough.i [label %if.end] asm.fallthrough.i: ; preds = %if.then br label %if.end @@ -43,4 +67,3 @@ if.end: ; preds = %asm.fallthrough.i, tail call void @llvm.assume(i1 %tobool2.not) br label %for.cond } - diff --git a/llvm/test/Transforms/GVN/dbg-redundant-load.ll b/llvm/test/Transforms/GVN/dbg-redundant-load.ll index 1ba4e8b..094467e 100644 --- a/llvm/test/Transforms/GVN/dbg-redundant-load.ll +++ b/llvm/test/Transforms/GVN/dbg-redundant-load.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s ; Check that the redundant load from %if.then is removed. @@ -6,15 +7,21 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" -; CHECK: @test_redundant_load( -; CHECK-LABEL: entry: -; CHECK-NEXT: load i32, ptr %Y, align 4, !dbg ![[LOC:[0-9]+]] -; CHECK-LABEL: if.then: -; CHECK-NOT: load -; CHECK-LABEL: if.end: -; CHECK: ![[LOC]] = !DILocation(line: 3, scope: !{{.*}}) - define i32 @test_redundant_load(i32 %X, ptr %Y) !dbg !6 { +; CHECK-LABEL: define i32 @test_redundant_load( +; CHECK-SAME: i32 [[X:%.*]], ptr [[Y:%.*]]) !dbg [[DBG6:![0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y]], align 4, !dbg [[DBG8:![0-9]+]] +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], -1, !dbg [[DBG9:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END:.*]], !dbg [[DBG9]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP0]], !dbg [[DBG10:![0-9]+]] +; CHECK-NEXT: call void @foo(), !dbg [[DBG11:![0-9]+]] +; CHECK-NEXT: br label %[[IF_END]], !dbg [[DBG12:![0-9]+]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: [[RESULT_0:%.*]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[TMP0]], %[[ENTRY]] ] +; CHECK-NEXT: ret i32 [[RESULT_0]], !dbg [[DBG13:![0-9]+]] +; entry: %0 = load i32, ptr %Y, align 4, !dbg !8 %cmp = icmp sgt i32 %X, -1, !dbg !9 @@ -50,3 +57,16 @@ declare void @foo() !11 = !DILocation(line: 7, scope: !6) !12 = !DILocation(line: 8, scope: !6) !13 = !DILocation(line: 10, scope: !6) +;. +; CHECK: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: [[META1:![0-9]+]], isOptimized: false, runtimeVersion: 0, emissionKind: LineTablesOnly, enums: [[META2:![0-9]+]]) +; CHECK: [[META1]] = !DIFile(filename: "test.cpp", directory: "") +; CHECK: [[META2]] = !{} +; CHECK: [[DBG6]] = distinct !DISubprogram(name: "test_redundant_load", scope: [[META1]], file: [[META1]], line: 2, type: [[META7:![0-9]+]], scopeLine: 2, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: [[META0]], retainedNodes: [[META2]]) +; CHECK: [[META7]] = !DISubroutineType(types: [[META2]]) +; CHECK: [[DBG8]] = !DILocation(line: 3, scope: [[DBG6]]) +; CHECK: [[DBG9]] = !DILocation(line: 5, scope: [[DBG6]]) +; CHECK: [[DBG10]] = !DILocation(line: 6, scope: [[DBG6]]) +; CHECK: [[DBG11]] = !DILocation(line: 7, scope: [[DBG6]]) +; CHECK: [[DBG12]] = !DILocation(line: 8, scope: [[DBG6]]) +; CHECK: [[DBG13]] = !DILocation(line: 10, scope: [[DBG6]]) +;. diff --git a/llvm/test/Transforms/GVN/fake-use-constprop.ll b/llvm/test/Transforms/GVN/fake-use-constprop.ll index 0e7ca10..85b7dc3 100644 --- a/llvm/test/Transforms/GVN/fake-use-constprop.ll +++ b/llvm/test/Transforms/GVN/fake-use-constprop.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s ; ; The Global Value Numbering pass (GVN) propagates boolean values @@ -33,11 +34,20 @@ ;; GVN should propagate a constant value through to a regular call, but not to ;; a fake use, which should continue to track the original value. -; CHECK: %[[CONV_VAR:[a-zA-Z0-9]+]] = fptosi -; CHECK: call {{.+}} @bees(i8 0) -; CHECK: call {{.+}} @llvm.fake.use(i8 %[[CONV_VAR]]) define i32 @foo(float %f) optdebug { +; CHECK-LABEL: define i32 @foo( +; CHECK-SAME: float [[F:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[CONV:%.*]] = fptosi float [[F]] to i8 +; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp eq i8 [[CONV]], 0 +; CHECK-NEXT: br i1 [[TOBOOL3]], label %[[IF_END:.*]], label %[[LAB:.*]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: tail call void (...) @bees(i8 0) +; CHECK-NEXT: tail call void (...) @llvm.fake.use(i8 [[CONV]]) +; CHECK-NEXT: br label %[[LAB]] +; CHECK: [[LAB]]: +; CHECK-NEXT: ret i32 1 +; %conv = fptosi float %f to i8 %tobool3 = icmp eq i8 %conv, 0 br i1 %tobool3, label %if.end, label %lab diff --git a/llvm/test/Transforms/GVN/flags.ll b/llvm/test/Transforms/GVN/flags.ll index 2e5aeed..3777e14 100644 --- a/llvm/test/Transforms/GVN/flags.ll +++ b/llvm/test/Transforms/GVN/flags.ll @@ -1,8 +1,17 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s declare void @use(i1) define void @test1(float %x, float %y) { +; CHECK-LABEL: define void @test1( +; CHECK-SAME: float [[X:%.*]], float [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP1:%.*]] = fcmp oeq float [[Y]], [[X]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: ret void +; entry: %cmp1 = fcmp nnan oeq float %y, %x %cmp2 = fcmp oeq float %x, %y @@ -10,9 +19,3 @@ entry: call void @use(i1 %cmp2) ret void } - -; CHECK-LABEL: define void @test1( -; CHECK: %[[cmp:.*]] = fcmp oeq float %y, %x -; CHECK-NEXT: call void @use(i1 %[[cmp]]) -; CHECK-NEXT: call void @use(i1 %[[cmp]]) -; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/GVN/fold-const-expr.ll b/llvm/test/Transforms/GVN/fold-const-expr.ll index 9e1129e..edbfcda 100644 --- a/llvm/test/Transforms/GVN/fold-const-expr.ll +++ b/llvm/test/Transforms/GVN/fold-const-expr.ll @@ -1,12 +1,24 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -passes=gvn -S < %s | FileCheck %s + ; GVN failed to do constant expression folding and expanded ; them unfolded in many places, producing exponentially large const ; expressions. As a result, the compilation never fisished. ; This test checks that we are folding constant expression ; PR 28418 -; RUN: opt -passes=gvn -S < %s | FileCheck %s %2 = type { i32, i32, i32, i32, i32 } define i32 @_Z16vector3util_mainv(i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @_Z16vector3util_mainv( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = alloca [[TMP0:%.*]], align 4 +; CHECK-NEXT: [[TMP114:%.*]] = getelementptr inbounds [[TMP0]], ptr [[TMP1]], i64 0, i32 1 +; CHECK-NEXT: store <4 x i32> <i32 234567891, i32 345678912, i32 456789123, i32 0>, ptr [[TMP114]], align 4 +; CHECK-NEXT: store i32 310393545, ptr [[TMP114]], align 4 +; CHECK-NEXT: store i32 -383584258, ptr [[TMP114]], align 4 +; CHECK-NEXT: store i32 -57163022, ptr [[TMP114]], align 4 +; CHECK-NEXT: ret i32 0 +; %tmp1 = alloca %2, align 4 %tmp114 = getelementptr inbounds %2, ptr %tmp1, i64 0, i32 1 store <4 x i32> <i32 234567891, i32 345678912, i32 456789123, i32 0>, ptr %tmp114, align 4 @@ -37,7 +49,6 @@ define i32 @_Z16vector3util_mainv(i32 %x, i32 %y) { %tmp1739 = shl i32 %tmp1738, 22 %tmp1740 = xor i32 %tmp1739, %tmp1738 store i32 %tmp1740, ptr %tmp1683, align 4 -; CHECK: store i32 310393545, ptr %tmp114, align 4 %tmp1756 = getelementptr inbounds %2, ptr %tmp1, i64 0, i32 1 %tmp1761 = load i32, ptr %tmp1756, align 4 %tmp1766 = shl i32 %tmp1761, 5 @@ -65,7 +76,6 @@ define i32 @_Z16vector3util_mainv(i32 %x, i32 %y) { %tmp1812 = shl i32 %tmp1811, 22 %tmp1813 = xor i32 %tmp1812, %tmp1811 store i32 %tmp1813, ptr %tmp1756, align 4 -; CHECK: store i32 -383584258, ptr %tmp114, align 4 %tmp2645 = getelementptr inbounds %2, ptr %tmp1, i64 0, i32 1 %tmp2650 = load i32, ptr %tmp2645, align 4 %tmp2655 = shl i32 %tmp2650, 5 @@ -93,6 +103,5 @@ define i32 @_Z16vector3util_mainv(i32 %x, i32 %y) { %tmp2701 = shl i32 %tmp2700, 22 %tmp2702 = xor i32 %tmp2701, %tmp2700 store i32 %tmp2702, ptr %tmp2645, align 4 -; CHECK: store i32 -57163022, ptr %tmp114, align 4 ret i32 0 } diff --git a/llvm/test/Transforms/GVN/fpmath.ll b/llvm/test/Transforms/GVN/fpmath.ll index 970dd89..2069faa 100644 --- a/llvm/test/Transforms/GVN/fpmath.ll +++ b/llvm/test/Transforms/GVN/fpmath.ll @@ -1,10 +1,13 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s define double @test1(double %x, double %y) { -; CHECK: @test1(double %x, double %y) -; CHECK: %add1 = fadd double %x, %y -; CHECK-NOT: fpmath -; CHECK: %foo = fadd double %add1, %add1 +; CHECK-LABEL: define double @test1( +; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) { +; CHECK-NEXT: [[ADD1:%.*]] = fadd double [[X]], [[Y]] +; CHECK-NEXT: [[FOO:%.*]] = fadd double [[ADD1]], [[ADD1]] +; CHECK-NEXT: ret double [[FOO]] +; %add1 = fadd double %x, %y, !fpmath !0 %add2 = fadd double %x, %y %foo = fadd double %add1, %add2 @@ -12,9 +15,12 @@ define double @test1(double %x, double %y) { } define double @test2(double %x, double %y) { -; CHECK: @test2(double %x, double %y) -; CHECK: %add1 = fadd double %x, %y, !fpmath !0 -; CHECK: %foo = fadd double %add1, %add1 +; CHECK-LABEL: define double @test2( +; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) { +; CHECK-NEXT: [[ADD1:%.*]] = fadd double [[X]], [[Y]], !fpmath [[META0:![0-9]+]] +; CHECK-NEXT: [[FOO:%.*]] = fadd double [[ADD1]], [[ADD1]] +; CHECK-NEXT: ret double [[FOO]] +; %add1 = fadd double %x, %y, !fpmath !0 %add2 = fadd double %x, %y, !fpmath !0 %foo = fadd double %add1, %add2 @@ -22,9 +28,12 @@ define double @test2(double %x, double %y) { } define double @test3(double %x, double %y) { -; CHECK: @test3(double %x, double %y) -; CHECK: %add1 = fadd double %x, %y, !fpmath !1 -; CHECK: %foo = fadd double %add1, %add1 +; CHECK-LABEL: define double @test3( +; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) { +; CHECK-NEXT: [[ADD1:%.*]] = fadd double [[X]], [[Y]], !fpmath [[META1:![0-9]+]] +; CHECK-NEXT: [[FOO:%.*]] = fadd double [[ADD1]], [[ADD1]] +; CHECK-NEXT: ret double [[FOO]] +; %add1 = fadd double %x, %y, !fpmath !1 %add2 = fadd double %x, %y, !fpmath !0 %foo = fadd double %add1, %add2 @@ -32,9 +41,12 @@ define double @test3(double %x, double %y) { } define double @test4(double %x, double %y) { -; CHECK: @test4(double %x, double %y) -; CHECK: %add1 = fadd double %x, %y, !fpmath !1 -; CHECK: %foo = fadd double %add1, %add1 +; CHECK-LABEL: define double @test4( +; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) { +; CHECK-NEXT: [[ADD1:%.*]] = fadd double [[X]], [[Y]], !fpmath [[META1]] +; CHECK-NEXT: [[FOO:%.*]] = fadd double [[ADD1]], [[ADD1]] +; CHECK-NEXT: ret double [[FOO]] +; %add1 = fadd double %x, %y, !fpmath !0 %add2 = fadd double %x, %y, !fpmath !1 %foo = fadd double %add1, %add2 @@ -42,9 +54,12 @@ define double @test4(double %x, double %y) { } define double @test5(double %x, double %y) { -; CHECK: @test5(double %x, double %y) -; CHECK: %neg1 = fneg double %x, !fpmath !1 -; CHECK: %foo = fadd double %neg1, %neg1 +; CHECK-LABEL: define double @test5( +; CHECK-SAME: double [[X:%.*]], double [[Y:%.*]]) { +; CHECK-NEXT: [[NEG1:%.*]] = fneg double [[X]], !fpmath [[META1]] +; CHECK-NEXT: [[FOO:%.*]] = fadd double [[NEG1]], [[NEG1]] +; CHECK-NEXT: ret double [[FOO]] +; %neg1 = fneg double %x, !fpmath !0 %neg2 = fneg double %x, !fpmath !1 %foo = fadd double %neg1, %neg2 @@ -53,3 +68,7 @@ define double @test5(double %x, double %y) { !0 = !{ float 5.0 } !1 = !{ float 2.5 } +;. +; CHECK: [[META0]] = !{float 5.000000e+00} +; CHECK: [[META1]] = !{float 2.500000e+00} +;. diff --git a/llvm/test/Transforms/GVN/funclet.ll b/llvm/test/Transforms/GVN/funclet.ll index 8ef4c96..34ed78f 100644 --- a/llvm/test/Transforms/GVN/funclet.ll +++ b/llvm/test/Transforms/GVN/funclet.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32" target triple = "i686-pc-windows-msvc" @@ -8,13 +9,35 @@ target triple = "i686-pc-windows-msvc" @"_TI1?AUA@@" = external constant %eh.ThrowInfo define i8 @f() personality ptr @__CxxFrameHandler3 { +; CHECK-LABEL: define i8 @f() personality ptr @__CxxFrameHandler3 { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[B:%.*]] = alloca i8, align 1 +; CHECK-NEXT: [[C:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[B]], align 1 +; CHECK-NEXT: store i8 13, ptr [[C]], align 1 +; CHECK-NEXT: invoke void @_CxxThrowException(ptr [[B]], ptr nonnull @"_TI1?AUA@@") +; CHECK-NEXT: to label %[[UNREACHABLE:.*]] unwind label %[[CATCH_DISPATCH:.*]] +; CHECK: [[CATCH_DISPATCH]]: +; CHECK-NEXT: [[CS1:%.*]] = catchswitch within none [label %catch] unwind to caller +; CHECK: [[CATCH:.*:]] +; CHECK-NEXT: [[CATCHPAD:%.*]] = catchpad within [[CS1]] [ptr null, i32 64, ptr null] +; CHECK-NEXT: store i8 5, ptr [[B]], align 1 +; CHECK-NEXT: catchret from [[CATCHPAD]] to label %[[TRY_CONT:.*]] +; CHECK: [[TRY_CONT]]: +; CHECK-NEXT: [[LOAD_B:%.*]] = load i8, ptr [[B]], align 1 +; CHECK-NEXT: [[LOAD_C:%.*]] = load i8, ptr [[C]], align 1 +; CHECK-NEXT: [[ADD:%.*]] = add i8 [[LOAD_B]], [[LOAD_C]] +; CHECK-NEXT: ret i8 [[ADD]] +; CHECK: [[UNREACHABLE]]: +; CHECK-NEXT: unreachable +; entry: %b = alloca i8 %c = alloca i8 store i8 42, ptr %b store i8 13, ptr %c invoke void @_CxxThrowException(ptr %b, ptr nonnull @"_TI1?AUA@@") - to label %unreachable unwind label %catch.dispatch + to label %unreachable unwind label %catch.dispatch catch.dispatch: ; preds = %entry %cs1 = catchswitch within none [label %catch] unwind to caller @@ -33,11 +56,6 @@ try.cont: ; preds = %catch unreachable: ; preds = %entry unreachable } -; CHECK-LABEL: define i8 @f( -; CHECK: %[[load_b:.*]] = load i8, ptr %b -; CHECK-NEXT: %[[load_c:.*]] = load i8, ptr %c -; CHECK-NEXT: %[[add:.*]] = add i8 %[[load_b]], %[[load_c]] -; CHECK-NEXT: ret i8 %[[add]] declare i32 @__CxxFrameHandler3(...) diff --git a/llvm/test/Transforms/GVN/int_sideeffect.ll b/llvm/test/Transforms/GVN/int_sideeffect.ll index 513533a..8754cc0 100644 --- a/llvm/test/Transforms/GVN/int_sideeffect.ll +++ b/llvm/test/Transforms/GVN/int_sideeffect.ll @@ -1,38 +1,56 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -S < %s -passes=gvn | FileCheck %s declare void @llvm.sideeffect() ; Store-to-load forwarding across a @llvm.sideeffect. - -; CHECK-LABEL: s2l -; CHECK-NOT: load define float @s2l(ptr %p) { - store float 0.0, ptr %p - call void @llvm.sideeffect() - %t = load float, ptr %p - ret float %t +; CHECK-LABEL: define float @s2l( +; CHECK-SAME: ptr [[P:%.*]]) { +; CHECK-NEXT: store float 0.000000e+00, ptr [[P]], align 4 +; CHECK-NEXT: call void @llvm.sideeffect() +; CHECK-NEXT: ret float 0.000000e+00 +; + store float 0.0, ptr %p + call void @llvm.sideeffect() + %t = load float, ptr %p + ret float %t } ; Redundant load elimination across a @llvm.sideeffect. - -; CHECK-LABEL: rle -; CHECK: load -; CHECK-NOT: load define float @rle(ptr %p) { - %r = load float, ptr %p - call void @llvm.sideeffect() - %s = load float, ptr %p - %t = fadd float %r, %s - ret float %t +; CHECK-LABEL: define float @rle( +; CHECK-SAME: ptr [[P:%.*]]) { +; CHECK-NEXT: [[R:%.*]] = load float, ptr [[P]], align 4 +; CHECK-NEXT: call void @llvm.sideeffect() +; CHECK-NEXT: [[T:%.*]] = fadd float [[R]], [[R]] +; CHECK-NEXT: ret float [[T]] +; + %r = load float, ptr %p + call void @llvm.sideeffect() + %s = load float, ptr %p + %t = fadd float %r, %s + ret float %t } ; LICM across a @llvm.sideeffect. - -; CHECK-LABEL: licm -; CHECK: load -; CHECK: loop: -; CHECK-NOT: load define float @licm(i64 %n, ptr nocapture readonly %p) #0 { +; CHECK-LABEL: define float @licm( +; CHECK-SAME: i64 [[N:%.*]], ptr readonly captures(none) [[P:%.*]]) { +; CHECK-NEXT: [[BB0:.*]]: +; CHECK-NEXT: [[T3_PRE:%.*]] = load float, ptr [[P]], align 4 +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, %[[BB0]] ], [ [[T5:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[SUM:%.*]] = phi float [ 0.000000e+00, %[[BB0]] ], [ [[T4:%.*]], %[[LOOP]] ] +; CHECK-NEXT: call void @llvm.sideeffect() +; CHECK-NEXT: [[T4]] = fadd float [[SUM]], [[T3_PRE]] +; CHECK-NEXT: [[T5]] = add i64 [[I]], 1 +; CHECK-NEXT: [[T6:%.*]] = icmp ult i64 [[T5]], [[N]] +; CHECK-NEXT: br i1 [[T6]], label %[[LOOP]], label %[[BB2:.*]] +; CHECK: [[BB2]]: +; CHECK-NEXT: ret float [[T4]] +; bb0: br label %loop diff --git a/llvm/test/Transforms/GVN/invariant.group.ll b/llvm/test/Transforms/GVN/invariant.group.ll index 9c673ba..aba20ee 100644 --- a/llvm/test/Transforms/GVN/invariant.group.ll +++ b/llvm/test/Transforms/GVN/invariant.group.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s %struct.A = type { ptr } @@ -6,130 +7,175 @@ @unknownPtr = external global i8 -; CHECK-LABEL: define i8 @simple() { define i8 @simple() { +; CHECK-LABEL: define i8 @simple() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0:![0-9]+]] +; CHECK-NEXT: call void @foo(ptr [[PTR]]) +; CHECK-NEXT: ret i8 42 +; entry: - %ptr = alloca i8 - store i8 42, ptr %ptr, !invariant.group !0 - call void @foo(ptr %ptr) - - %a = load i8, ptr %ptr, !invariant.group !0 - %b = load i8, ptr %ptr, !invariant.group !0 - %c = load i8, ptr %ptr, !invariant.group !0 -; CHECK: ret i8 42 - ret i8 %a + %ptr = alloca i8 + store i8 42, ptr %ptr, !invariant.group !0 + call void @foo(ptr %ptr) + + %a = load i8, ptr %ptr, !invariant.group !0 + %b = load i8, ptr %ptr, !invariant.group !0 + %c = load i8, ptr %ptr, !invariant.group !0 + ret i8 %a } -; CHECK-LABEL: define i8 @optimizable1() { define i8 @optimizable1() { +; CHECK-LABEL: define i8 @optimizable1() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: [[PTR2:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr [[PTR]]) +; CHECK-NEXT: call void @foo(ptr [[PTR2]]) +; CHECK-NEXT: ret i8 42 +; entry: - %ptr = alloca i8 - store i8 42, ptr %ptr, !invariant.group !0 - %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr) - %a = load i8, ptr %ptr, !invariant.group !0 - - call void @foo(ptr %ptr2); call to use %ptr2 -; CHECK: ret i8 42 - ret i8 %a + %ptr = alloca i8 + store i8 42, ptr %ptr, !invariant.group !0 + %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr) + %a = load i8, ptr %ptr, !invariant.group !0 + + call void @foo(ptr %ptr2); call to use %ptr2 + ret i8 %a } -; CHECK-LABEL: define i8 @optimizable2() { define i8 @optimizable2() { +; CHECK-LABEL: define i8 @optimizable2() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @foo(ptr [[PTR]]) +; CHECK-NEXT: store i8 13, ptr [[PTR]], align 1 +; CHECK-NEXT: call void @bar(i8 13) +; CHECK-NEXT: call void @foo(ptr [[PTR]]) +; CHECK-NEXT: ret i8 42 +; entry: - %ptr = alloca i8 - store i8 42, ptr %ptr, !invariant.group !0 - call void @foo(ptr %ptr) - - store i8 13, ptr %ptr ; can't use this store with invariant.group - %a = load i8, ptr %ptr - call void @bar(i8 %a) ; call to use %a - - call void @foo(ptr %ptr) - %b = load i8, ptr %ptr, !invariant.group !0 - -; CHECK: ret i8 42 - ret i8 %b + %ptr = alloca i8 + store i8 42, ptr %ptr, !invariant.group !0 + call void @foo(ptr %ptr) + + store i8 13, ptr %ptr ; can't use this store with invariant.group + %a = load i8, ptr %ptr + call void @bar(i8 %a) ; call to use %a + + call void @foo(ptr %ptr) + %b = load i8, ptr %ptr, !invariant.group !0 + + ret i8 %b } -; CHECK-LABEL: define i1 @proveEqualityForStrip( -define i1 @proveEqualityForStrip(ptr %a) { ; FIXME: The first call could be also removed by GVN. Right now ; DCE removes it. The second call is CSE'd with the first one. -; CHECK: %b1 = call ptr @llvm.strip.invariant.group.p0(ptr %a) +define i1 @proveEqualityForStrip(ptr %a) { +; CHECK-LABEL: define i1 @proveEqualityForStrip( +; CHECK-SAME: ptr [[A:%.*]]) { +; CHECK-NEXT: [[B1:%.*]] = call ptr @llvm.strip.invariant.group.p0(ptr [[A]]) +; CHECK-NEXT: ret i1 true +; %b1 = call ptr @llvm.strip.invariant.group.p0(ptr %a) -; CHECK-NOT: llvm.strip.invariant.group %b2 = call ptr @llvm.strip.invariant.group.p0(ptr %a) %r = icmp eq ptr %b1, %b2 -; CHECK: ret i1 true ret i1 %r } -; CHECK-LABEL: define i8 @unoptimizable1() { + define i8 @unoptimizable1() { +; CHECK-LABEL: define i8 @unoptimizable1() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1 +; CHECK-NEXT: call void @foo(ptr [[PTR]]) +; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: ret i8 [[A]] +; entry: - %ptr = alloca i8 - store i8 42, ptr %ptr - call void @foo(ptr %ptr) - %a = load i8, ptr %ptr, !invariant.group !0 -; CHECK: ret i8 %a - ret i8 %a + %ptr = alloca i8 + store i8 42, ptr %ptr + call void @foo(ptr %ptr) + %a = load i8, ptr %ptr, !invariant.group !0 + ret i8 %a } -; CHECK-LABEL: define void @indirectLoads() { define void @indirectLoads() { +; CHECK-LABEL: define void @indirectLoads() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[A:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: [[CALL:%.*]] = call ptr @getPointer(ptr null) +; CHECK-NEXT: call void @_ZN1AC1Ev(ptr [[CALL]]) +; CHECK-NEXT: [[VTABLE:%.*]] = load ptr, ptr [[CALL]], align 8, !invariant.group [[META0]] +; CHECK-NEXT: [[CMP_VTABLES:%.*]] = icmp eq ptr [[VTABLE]], getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2) +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_VTABLES]]) +; CHECK-NEXT: store ptr [[CALL]], ptr [[A]], align 8 +; CHECK-NEXT: call void @_ZN1A3fooEv(ptr [[CALL]]) +; CHECK-NEXT: call void @_ZN1A3fooEv(ptr [[CALL]]) +; CHECK-NEXT: call void @_ZN1A3fooEv(ptr [[CALL]]) +; CHECK-NEXT: call void @_ZN1A3fooEv(ptr [[CALL]]) +; CHECK-NEXT: ret void +; entry: %a = alloca ptr, align 8 - - %call = call ptr @getPointer(ptr null) + + %call = call ptr @getPointer(ptr null) call void @_ZN1AC1Ev(ptr %call) - -; CHECK: %vtable = load {{.*}} !invariant.group + %vtable = load ptr, ptr %call, align 8, !invariant.group !0 %cmp.vtables = icmp eq ptr %vtable, getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2) call void @llvm.assume(i1 %cmp.vtables) - + store ptr %call, ptr %a, align 8 %0 = load ptr, ptr %a, align 8 -; CHECK: call void @_ZN1A3fooEv( %vtable1 = load ptr, ptr %0, align 8, !invariant.group !0 %1 = load ptr, ptr %vtable1, align 8 call void %1(ptr %0) %2 = load ptr, ptr %a, align 8 -; CHECK: call void @_ZN1A3fooEv( %vtable2 = load ptr, ptr %2, align 8, !invariant.group !0 %3 = load ptr, ptr %vtable2, align 8 - + call void %3(ptr %2) %4 = load ptr, ptr %a, align 8 - + %vtable4 = load ptr, ptr %4, align 8, !invariant.group !0 %5 = load ptr, ptr %vtable4, align 8 -; CHECK: call void @_ZN1A3fooEv( call void %5(ptr %4) - + %vtable5 = load ptr, ptr %call, align 8, !invariant.group !0 %6 = load ptr, ptr %vtable5, align 8 -; CHECK: call void @_ZN1A3fooEv( call void %6(ptr %4) - + ret void } -; CHECK-LABEL: define void @combiningBitCastWithLoad() { define void @combiningBitCastWithLoad() { +; CHECK-LABEL: define void @combiningBitCastWithLoad() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[A:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: [[CALL:%.*]] = call ptr @getPointer(ptr null) +; CHECK-NEXT: call void @_ZN1AC1Ev(ptr [[CALL]]) +; CHECK-NEXT: [[VTABLE:%.*]] = load ptr, ptr [[CALL]], align 8, !invariant.group [[META0]] +; CHECK-NEXT: [[CMP_VTABLES:%.*]] = icmp eq ptr [[VTABLE]], getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2) +; CHECK-NEXT: store ptr [[CALL]], ptr [[A]], align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[VTABLE]], align 8 +; CHECK-NEXT: call void [[TMP0]](ptr [[CALL]]) +; CHECK-NEXT: ret void +; entry: %a = alloca ptr, align 8 - - %call = call ptr @getPointer(ptr null) + + %call = call ptr @getPointer(ptr null) call void @_ZN1AC1Ev(ptr %call) - -; CHECK: %vtable = load {{.*}} !invariant.group + %vtable = load ptr, ptr %call, align 8, !invariant.group !0 %cmp.vtables = icmp eq ptr %vtable, getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2) - + store ptr %call, ptr %a, align 8 -; CHECK-NOT: !invariant.group %0 = load ptr, ptr %a, align 8 %vtable1 = load ptr, ptr %0, align 8, !invariant.group !0 @@ -139,185 +185,255 @@ entry: ret void } -; CHECK-LABEL:define void @loadCombine() { define void @loadCombine() { +; CHECK-LABEL: define void @loadCombine() { +; CHECK-NEXT: [[ENTER:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1 +; CHECK-NEXT: call void @foo(ptr [[PTR]]) +; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @bar(i8 [[A]]) +; CHECK-NEXT: call void @bar(i8 [[A]]) +; CHECK-NEXT: ret void +; enter: %ptr = alloca i8 store i8 42, ptr %ptr call void @foo(ptr %ptr) -; CHECK: %[[A:.*]] = load i8, ptr %ptr, align 1, !invariant.group %a = load i8, ptr %ptr, !invariant.group !0 -; CHECK-NOT: load %b = load i8, ptr %ptr, !invariant.group !0 -; CHECK: call void @bar(i8 %[[A]]) call void @bar(i8 %a) -; CHECK: call void @bar(i8 %[[A]]) call void @bar(i8 %b) ret void } -; CHECK-LABEL: define void @loadCombine1() { define void @loadCombine1() { +; CHECK-LABEL: define void @loadCombine1() { +; CHECK-NEXT: [[ENTER:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1 +; CHECK-NEXT: call void @foo(ptr [[PTR]]) +; CHECK-NEXT: [[C:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @bar(i8 [[C]]) +; CHECK-NEXT: call void @bar(i8 [[C]]) +; CHECK-NEXT: ret void +; enter: %ptr = alloca i8 store i8 42, ptr %ptr call void @foo(ptr %ptr) -; CHECK: %[[D:.*]] = load i8, ptr %ptr, align 1, !invariant.group %c = load i8, ptr %ptr -; CHECK-NOT: load %d = load i8, ptr %ptr, !invariant.group !0 -; CHECK: call void @bar(i8 %[[D]]) call void @bar(i8 %c) -; CHECK: call void @bar(i8 %[[D]]) call void @bar(i8 %d) ret void } -; CHECK-LABEL: define void @loadCombine2() { define void @loadCombine2() { +; CHECK-LABEL: define void @loadCombine2() { +; CHECK-NEXT: [[ENTER:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1 +; CHECK-NEXT: call void @foo(ptr [[PTR]]) +; CHECK-NEXT: [[E:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @bar(i8 [[E]]) +; CHECK-NEXT: call void @bar(i8 [[E]]) +; CHECK-NEXT: ret void +; enter: %ptr = alloca i8 store i8 42, ptr %ptr call void @foo(ptr %ptr) -; CHECK: %[[E:.*]] = load i8, ptr %ptr, align 1, !invariant.group %e = load i8, ptr %ptr, !invariant.group !0 -; CHECK-NOT: load %f = load i8, ptr %ptr -; CHECK: call void @bar(i8 %[[E]]) call void @bar(i8 %e) -; CHECK: call void @bar(i8 %[[E]]) call void @bar(i8 %f) ret void } -; CHECK-LABEL: define void @loadCombine3() { define void @loadCombine3() { +; CHECK-LABEL: define void @loadCombine3() { +; CHECK-NEXT: [[ENTER:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1 +; CHECK-NEXT: call void @foo(ptr [[PTR]]) +; CHECK-NEXT: [[E:%.*]] = load i8, ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @bar(i8 [[E]]) +; CHECK-NEXT: call void @bar(i8 [[E]]) +; CHECK-NEXT: ret void +; enter: %ptr = alloca i8 store i8 42, ptr %ptr call void @foo(ptr %ptr) -; CHECK: %[[E:.*]] = load i8, ptr %ptr, align 1, !invariant.group %e = load i8, ptr %ptr, !invariant.group !0 -; CHECK-NOT: load %f = load i8, ptr %ptr, !invariant.group !0 -; CHECK: call void @bar(i8 %[[E]]) call void @bar(i8 %e) -; CHECK: call void @bar(i8 %[[E]]) call void @bar(i8 %f) ret void } -; CHECK-LABEL: define i8 @unoptimizable2() { define i8 @unoptimizable2() { +; CHECK-LABEL: define i8 @unoptimizable2() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1 +; CHECK-NEXT: call void @foo(ptr [[PTR]]) +; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[PTR]], align 1 +; CHECK-NEXT: call void @foo(ptr [[PTR]]) +; CHECK-NEXT: ret i8 [[A]] +; entry: - %ptr = alloca i8 - store i8 42, ptr %ptr - call void @foo(ptr %ptr) - %a = load i8, ptr %ptr - call void @foo(ptr %ptr) - %b = load i8, ptr %ptr, !invariant.group !0 - -; CHECK: ret i8 %a - ret i8 %a + %ptr = alloca i8 + store i8 42, ptr %ptr + call void @foo(ptr %ptr) + %a = load i8, ptr %ptr + call void @foo(ptr %ptr) + %b = load i8, ptr %ptr, !invariant.group !0 + + ret i8 %a } -; CHECK-LABEL: define i8 @unoptimizable3() { define i8 @unoptimizable3() { +; CHECK-LABEL: define i8 @unoptimizable3() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: [[PTR2:%.*]] = call ptr @getPointer(ptr [[PTR]]) +; CHECK-NEXT: [[A:%.*]] = load i8, ptr [[PTR2]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: ret i8 [[A]] +; entry: - %ptr = alloca i8 - store i8 42, ptr %ptr, !invariant.group !0 - %ptr2 = call ptr @getPointer(ptr %ptr) - %a = load i8, ptr %ptr2, !invariant.group !0 - -; CHECK: ret i8 %a - ret i8 %a + %ptr = alloca i8 + store i8 42, ptr %ptr, !invariant.group !0 + %ptr2 = call ptr @getPointer(ptr %ptr) + %a = load i8, ptr %ptr2, !invariant.group !0 + + ret i8 %a } -; CHECK-LABEL: define i8 @optimizable4() { define i8 @optimizable4() { +; CHECK-LABEL: define i8 @optimizable4() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: [[PTR2:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr [[PTR]]) +; CHECK-NEXT: ret i8 42 +; entry: - %ptr = alloca i8 - store i8 42, ptr %ptr, !invariant.group !0 - %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr) -; CHECK-NOT: load - %a = load i8, ptr %ptr2, !invariant.group !0 - -; CHECK: ret i8 42 - ret i8 %a + %ptr = alloca i8 + store i8 42, ptr %ptr, !invariant.group !0 + %ptr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr) + %a = load i8, ptr %ptr2, !invariant.group !0 + + ret i8 %a } -; CHECK-LABEL: define i8 @volatile1() { define i8 @volatile1() { +; CHECK-LABEL: define i8 @volatile1() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @foo(ptr [[PTR]]) +; CHECK-NEXT: [[B:%.*]] = load volatile i8, ptr [[PTR]], align 1 +; CHECK-NEXT: call void @bar(i8 [[B]]) +; CHECK-NEXT: [[C:%.*]] = load volatile i8, ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @bar(i8 [[C]]) +; CHECK-NEXT: ret i8 42 +; entry: - %ptr = alloca i8 - store i8 42, ptr %ptr, !invariant.group !0 - call void @foo(ptr %ptr) - %a = load i8, ptr %ptr, !invariant.group !0 - %b = load volatile i8, ptr %ptr -; CHECK: call void @bar(i8 %b) - call void @bar(i8 %b) - - %c = load volatile i8, ptr %ptr, !invariant.group !0 + %ptr = alloca i8 + store i8 42, ptr %ptr, !invariant.group !0 + call void @foo(ptr %ptr) + %a = load i8, ptr %ptr, !invariant.group !0 + %b = load volatile i8, ptr %ptr + call void @bar(i8 %b) + + %c = load volatile i8, ptr %ptr, !invariant.group !0 ; FIXME: we could change %c to 42, preserving volatile load -; CHECK: call void @bar(i8 %c) - call void @bar(i8 %c) -; CHECK: ret i8 42 - ret i8 %a + call void @bar(i8 %c) + ret i8 %a } -; CHECK-LABEL: define i8 @volatile2() { define i8 @volatile2() { +; CHECK-LABEL: define i8 @volatile2() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @foo(ptr [[PTR]]) +; CHECK-NEXT: [[B:%.*]] = load volatile i8, ptr [[PTR]], align 1 +; CHECK-NEXT: call void @bar(i8 [[B]]) +; CHECK-NEXT: [[C:%.*]] = load volatile i8, ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @bar(i8 [[C]]) +; CHECK-NEXT: ret i8 42 +; entry: - %ptr = alloca i8 - store i8 42, ptr %ptr, !invariant.group !0 - call void @foo(ptr %ptr) - %a = load i8, ptr %ptr, !invariant.group !0 - %b = load volatile i8, ptr %ptr -; CHECK: call void @bar(i8 %b) - call void @bar(i8 %b) - - %c = load volatile i8, ptr %ptr, !invariant.group !0 + %ptr = alloca i8 + store i8 42, ptr %ptr, !invariant.group !0 + call void @foo(ptr %ptr) + %a = load i8, ptr %ptr, !invariant.group !0 + %b = load volatile i8, ptr %ptr + call void @bar(i8 %b) + + %c = load volatile i8, ptr %ptr, !invariant.group !0 ; FIXME: we could change %c to 42, preserving volatile load -; CHECK: call void @bar(i8 %c) - call void @bar(i8 %c) -; CHECK: ret i8 42 - ret i8 %a + call void @bar(i8 %c) + ret i8 %a } -; CHECK-LABEL: define i8 @fun() { define i8 @fun() { +; CHECK-LABEL: define i8 @fun() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[PTR:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 42, ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @foo(ptr [[PTR]]) +; CHECK-NEXT: call void @bar(i8 42) +; CHECK-NEXT: [[NEWPTR:%.*]] = call ptr @getPointer(ptr [[PTR]]) +; CHECK-NEXT: [[C:%.*]] = load i8, ptr [[NEWPTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @bar(i8 [[C]]) +; CHECK-NEXT: [[UNKNOWNVALUE:%.*]] = load i8, ptr @unknownPtr, align 1 +; CHECK-NEXT: store i8 [[UNKNOWNVALUE]], ptr [[PTR]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: [[NEWPTR2:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr [[PTR]]) +; CHECK-NEXT: ret i8 [[UNKNOWNVALUE]] +; entry: - %ptr = alloca i8 - store i8 42, ptr %ptr, !invariant.group !0 - call void @foo(ptr %ptr) - - %a = load i8, ptr %ptr, !invariant.group !0 ; Can assume that value under %ptr didn't change -; CHECK: call void @bar(i8 42) - call void @bar(i8 %a) - - %newPtr = call ptr @getPointer(ptr %ptr) - %c = load i8, ptr %newPtr, !invariant.group !0 ; Can't assume anything, because we only have information about %ptr -; CHECK: call void @bar(i8 %c) - call void @bar(i8 %c) - - %unknownValue = load i8, ptr @unknownPtr + %ptr = alloca i8 + store i8 42, ptr %ptr, !invariant.group !0 + call void @foo(ptr %ptr) + + %a = load i8, ptr %ptr, !invariant.group !0 ; Can assume that value under %ptr didn't change + call void @bar(i8 %a) + + %newPtr = call ptr @getPointer(ptr %ptr) + %c = load i8, ptr %newPtr, !invariant.group !0 ; Can't assume anything, because we only have information about %ptr + call void @bar(i8 %c) + + %unknownValue = load i8, ptr @unknownPtr ; FIXME: Can assume that %unknownValue == 42 -; CHECK: store i8 %unknownValue, ptr %ptr, align 1, !invariant.group !0 - store i8 %unknownValue, ptr %ptr, !invariant.group !0 - - %newPtr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr) -; CHECK-NOT: load - %d = load i8, ptr %newPtr2, !invariant.group !0 -; CHECK: ret i8 %unknownValue - ret i8 %d + store i8 %unknownValue, ptr %ptr, !invariant.group !0 + + %newPtr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr) + %d = load i8, ptr %newPtr2, !invariant.group !0 + ret i8 %d } ; This test checks if invariant.group understands gep with zeros -; CHECK-LABEL: define void @testGEP0() { define void @testGEP0() { +; CHECK-LABEL: define void @testGEP0() { +; CHECK-NEXT: [[A:%.*]] = alloca [[STRUCT_A:%.*]], align 8 +; CHECK-NEXT: store ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), ptr [[A]], align 8, !invariant.group [[META0]] +; CHECK-NEXT: call void @_ZN1A3fooEv(ptr nonnull dereferenceable(8) [[A]]) +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @unknownPtr, align 4 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[TMP2]], label %[[_Z1GR1A_EXIT:.*]], label %[[BB3:.*]] +; CHECK: [[BB3]]: +; CHECK-NEXT: call void @_ZN1A3fooEv(ptr nonnull [[A]]) +; CHECK-NEXT: br label %[[_Z1GR1A_EXIT]] +; CHECK: [[_Z1GR1A_EXIT]]: +; CHECK-NEXT: ret void +; %a = alloca %struct.A, align 8 store ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), ptr %a, align 8, !invariant.group !0 -; CHECK: call void @_ZN1A3fooEv(ptr nonnull dereferenceable(8) %a) call void @_ZN1A3fooEv(ptr nonnull dereferenceable(8) %a) ; This call may change vptr %1 = load i8, ptr @unknownPtr, align 4 %2 = icmp eq i8 %1, 0 @@ -326,7 +442,6 @@ define void @testGEP0() { ; This should be devirtualized by invariant.group %4 = load ptr, ptr %a, align 8, !invariant.group !0 %5 = load ptr, ptr %4, align 8 -; CHECK: call void @_ZN1A3fooEv(ptr nonnull %a) call void %5(ptr nonnull %a) br label %_Z1gR1A.exit @@ -337,51 +452,86 @@ _Z1gR1A.exit: ; preds = %0, %3 ; Check if no optimizations are performed with global pointers. ; FIXME: we could do the optimizations if we would check if dependency comes ; from the same function. -; CHECK-LABEL: define void @testGlobal() { define void @testGlobal() { -; CHECK: %a = load i8, ptr @unknownPtr, align 1, !invariant.group !0 - %a = load i8, ptr @unknownPtr, !invariant.group !0 - call void @foo2(ptr @unknownPtr, i8 %a) -; CHECK: %1 = load i8, ptr @unknownPtr, align 1, !invariant.group !0 - %1 = load i8, ptr @unknownPtr, !invariant.group !0 - call void @bar(i8 %1) - - call void @fooBit(ptr @unknownPtr, i1 1) +; CHECK-LABEL: define void @testGlobal() { +; CHECK-NEXT: [[A:%.*]] = load i8, ptr @unknownPtr, align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @foo2(ptr @unknownPtr, i8 [[A]]) +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @unknownPtr, align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @bar(i8 [[TMP1]]) +; CHECK-NEXT: call void @fooBit(ptr @unknownPtr, i1 true) +; CHECK-NEXT: [[TMP2:%.*]] = load i1, ptr @unknownPtr, align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @fooBit(ptr @unknownPtr, i1 [[TMP2]]) +; CHECK-NEXT: [[TMP3:%.*]] = load i1, ptr @unknownPtr, align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @fooBit(ptr @unknownPtr, i1 [[TMP3]]) +; CHECK-NEXT: ret void +; + %a = load i8, ptr @unknownPtr, !invariant.group !0 + call void @foo2(ptr @unknownPtr, i8 %a) + %1 = load i8, ptr @unknownPtr, !invariant.group !0 + call void @bar(i8 %1) + + call void @fooBit(ptr @unknownPtr, i1 1) ; Adding regex because of canonicalization of bitcasts -; CHECK: %2 = load i1, ptr {{.*}}, !invariant.group !0 - %2 = load i1, ptr @unknownPtr, !invariant.group !0 - call void @fooBit(ptr @unknownPtr, i1 %2) -; CHECK: %3 = load i1, ptr {{.*}}, !invariant.group !0 - %3 = load i1, ptr @unknownPtr, !invariant.group !0 - call void @fooBit(ptr @unknownPtr, i1 %3) - ret void + %2 = load i1, ptr @unknownPtr, !invariant.group !0 + call void @fooBit(ptr @unknownPtr, i1 %2) + %3 = load i1, ptr @unknownPtr, !invariant.group !0 + call void @fooBit(ptr @unknownPtr, i1 %3) + ret void } ; And in the case it is not global -; CHECK-LABEL: define void @testNotGlobal() { define void @testNotGlobal() { - %a = alloca i8 - call void @foo(ptr %a) -; CHECK: %b = load i8, ptr %a, align 1, !invariant.group !0 - %b = load i8, ptr %a, !invariant.group !0 - call void @foo2(ptr %a, i8 %b) - - %1 = load i8, ptr %a, !invariant.group !0 -; CHECK: call void @bar(i8 %b) - call void @bar(i8 %1) - - call void @fooBit(ptr %a, i1 1) -; CHECK: %1 = trunc i8 %b to i1 - %2 = load i1, ptr %a, !invariant.group !0 -; CHECK-NEXT: call void @fooBit(ptr %a, i1 %1) - call void @fooBit(ptr %a, i1 %2) - %3 = load i1, ptr %a, !invariant.group !0 -; CHECK-NEXT: call void @fooBit(ptr %a, i1 %1) - call void @fooBit(ptr %a, i1 %3) - ret void +; CHECK-LABEL: define void @testNotGlobal() { +; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 +; CHECK-NEXT: call void @foo(ptr [[A]]) +; CHECK-NEXT: [[B:%.*]] = load i8, ptr [[A]], align 1, !invariant.group [[META0]] +; CHECK-NEXT: call void @foo2(ptr [[A]], i8 [[B]]) +; CHECK-NEXT: call void @bar(i8 [[B]]) +; CHECK-NEXT: call void @fooBit(ptr [[A]], i1 true) +; CHECK-NEXT: [[TMP1:%.*]] = trunc i8 [[B]] to i1 +; CHECK-NEXT: call void @fooBit(ptr [[A]], i1 [[TMP1]]) +; CHECK-NEXT: call void @fooBit(ptr [[A]], i1 [[TMP1]]) +; CHECK-NEXT: ret void +; + %a = alloca i8 + call void @foo(ptr %a) + %b = load i8, ptr %a, !invariant.group !0 + call void @foo2(ptr %a, i8 %b) + + %1 = load i8, ptr %a, !invariant.group !0 + call void @bar(i8 %1) + + call void @fooBit(ptr %a, i1 1) + %2 = load i1, ptr %a, !invariant.group !0 + call void @fooBit(ptr %a, i1 %2) + %3 = load i1, ptr %a, !invariant.group !0 + call void @fooBit(ptr %a, i1 %3) + ret void } -; CHECK-LABEL: define void @handling_loops() define void @handling_loops() { +; CHECK-LABEL: define void @handling_loops() { +; CHECK-NEXT: [[A:%.*]] = alloca [[STRUCT_A:%.*]], align 8 +; CHECK-NEXT: store ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), ptr [[A]], align 8, !invariant.group [[META0]] +; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @unknownPtr, align 4 +; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i8 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[TMP2]], [[DOTLR_PH_I:label %.*]], label %[[_Z2G2R1A_EXIT:.*]] +; CHECK: [[_LR_PH_I:.*:]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[TMP1]], 1 +; CHECK-NEXT: br i1 [[TMP3]], label %[[DOT_CRIT_EDGE_PREHEADER:.*]], label %[[_Z2G2R1A_EXIT]] +; CHECK: [[__CRIT_EDGE_PREHEADER:.*:]] +; CHECK-NEXT: br label %[[DOT_CRIT_EDGE:.*]] +; CHECK: [[__CRIT_EDGE:.*:]] +; CHECK-NEXT: [[TMP4:%.*]] = phi i8 [ [[TMP5:%.*]], %[[DOT_CRIT_EDGE]] ], [ 1, %[[DOT_CRIT_EDGE_PREHEADER]] ] +; CHECK-NEXT: call void @_ZN1A3fooEv(ptr nonnull [[A]]) +; CHECK-NEXT: [[TMP5]] = add nuw nsw i8 [[TMP4]], 1 +; CHECK-NEXT: [[TMP6:%.*]] = load i8, ptr @unknownPtr, align 4 +; CHECK-NEXT: [[TMP7:%.*]] = icmp slt i8 [[TMP5]], [[TMP6]] +; CHECK-NEXT: br i1 [[TMP7]], label %[[DOT_CRIT_EDGE]], label %[[_Z2G2R1A_EXIT_LOOPEXIT:.*]] +; CHECK: [[_Z2G2R1A_EXIT_LOOPEXIT]]: +; CHECK-NEXT: br label %[[_Z2G2R1A_EXIT]] +; CHECK: [[_Z2G2R1A_EXIT]]: +; CHECK-NEXT: ret void +; %a = alloca %struct.A, align 8 store ptr getelementptr inbounds ([3 x ptr], ptr @_ZTV1A, i64 0, i64 2), ptr %a, align 8, !invariant.group !0 %1 = load i8, ptr @unknownPtr, align 4 @@ -400,9 +550,7 @@ define void @handling_loops() { %5 = phi i8 [ %7, %._crit_edge ], [ 1, %._crit_edge.preheader ] %.pre = load ptr, ptr %a, align 8, !invariant.group !0 %6 = load ptr, ptr %.pre, align 8 - ; CHECK: call void @_ZN1A3fooEv(ptr nonnull %a) call void %6(ptr nonnull %a) #3 - ; CHECK-NOT: call void % %7 = add nuw nsw i8 %5, 1 %8 = load i8, ptr @unknownPtr, align 4 %9 = icmp slt i8 %7, %8 @@ -432,3 +580,6 @@ declare void @llvm.assume(i1 %cmp.vtables) !0 = !{} +;. +; CHECK: [[META0]] = !{} +;. diff --git a/llvm/test/Transforms/GVN/invariant.start.ll b/llvm/test/Transforms/GVN/invariant.start.ll index f2d7dd0..6f38197 100644 --- a/llvm/test/Transforms/GVN/invariant.start.ll +++ b/llvm/test/Transforms/GVN/invariant.start.ll @@ -1,16 +1,19 @@ -; Test to make sure llvm.invariant.start calls are not treated as clobbers. +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s +; Test to make sure llvm.invariant.start calls are not treated as clobbers. declare ptr @llvm.invariant.start.p0(i64, ptr nocapture) nounwind readonly declare void @llvm.invariant.end.p0(ptr, i64, ptr nocapture) nounwind ; We forward store to the load across the invariant.start intrinsic define i8 @forward_store() { -; CHECK-LABEL: @forward_store -; CHECK: call ptr @llvm.invariant.start.p0(i64 1, ptr %a) -; CHECK-NOT: load -; CHECK: ret i8 0 +; CHECK-LABEL: define i8 @forward_store() { +; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 0, ptr [[A]], align 1 +; CHECK-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: ret i8 0 +; %a = alloca i8 store i8 0, ptr %a %i = call ptr @llvm.invariant.start.p0(i64 1, ptr %a) @@ -23,10 +26,18 @@ declare i8 @dummy(ptr nocapture) nounwind readonly ; We forward store to the load in the non-local analysis case, ; i.e. invariant.start is in another basic block. define i8 @forward_store_nonlocal(i1 %cond) { -; CHECK-LABEL: forward_store_nonlocal -; CHECK: call ptr @llvm.invariant.start.p0(i64 1, ptr %a) -; CHECK: ret i8 0 -; CHECK: ret i8 %val +; CHECK-LABEL: define i8 @forward_store_nonlocal( +; CHECK-SAME: i1 [[COND:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 0, ptr [[A]], align 1 +; CHECK-NEXT: [[I:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: br i1 [[COND]], label %[[LOADBLOCK:.*]], label %[[EXIT:.*]] +; CHECK: [[LOADBLOCK]]: +; CHECK-NEXT: ret i8 0 +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[VAL:%.*]] = call i8 @dummy(ptr [[A]]) +; CHECK-NEXT: ret i8 [[VAL]] +; %a = alloca i8 store i8 0, ptr %a %i = call ptr @llvm.invariant.start.p0(i64 1, ptr %a) @@ -43,12 +54,14 @@ exit: ; We should not value forward %foo to the invariant.end corresponding to %bar. define i8 @forward_store1() { -; CHECK-LABEL: forward_store1 -; CHECK: %foo = call ptr @llvm.invariant.start.p0 -; CHECK-NOT: load -; CHECK: %bar = call ptr @llvm.invariant.start.p0 -; CHECK: call void @llvm.invariant.end.p0(ptr %bar, i64 1, ptr %a) -; CHECK: ret i8 0 +; CHECK-LABEL: define i8 @forward_store1() { +; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 +; CHECK-NEXT: store i8 0, ptr [[A]], align 1 +; CHECK-NEXT: [[FOO:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: [[BAR:%.*]] = call ptr @llvm.invariant.start.p0(i64 1, ptr [[A]]) +; CHECK-NEXT: call void @llvm.invariant.end.p0(ptr [[BAR]], i64 1, ptr [[A]]) +; CHECK-NEXT: ret i8 0 +; %a = alloca i8 store i8 0, ptr %a %foo = call ptr @llvm.invariant.start.p0(i64 1, ptr %a) diff --git a/llvm/test/Transforms/GVN/load-constant-mem.ll b/llvm/test/Transforms/GVN/load-constant-mem.ll index d5858d6..f5b0d7c 100644 --- a/llvm/test/Transforms/GVN/load-constant-mem.ll +++ b/llvm/test/Transforms/GVN/load-constant-mem.ll @@ -1,19 +1,21 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn,instcombine -S | FileCheck %s + ; PR4189 @G = external constant [4 x i32] define i32 @test(ptr %p, i32 %i) nounwind { +; CHECK-LABEL: define i32 @test( +; CHECK-SAME: ptr [[P:%.*]], i32 [[I:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: store i8 4, ptr [[P]], align 1 +; CHECK-NEXT: ret i32 0 +; entry: - %P = getelementptr [4 x i32], ptr @G, i32 0, i32 %i - %A = load i32, ptr %P - store i8 4, ptr %p - %B = load i32, ptr %P - %C = sub i32 %A, %B - ret i32 %C + %P = getelementptr [4 x i32], ptr @G, i32 0, i32 %i + %A = load i32, ptr %P + store i8 4, ptr %p + %B = load i32, ptr %P + %C = sub i32 %A, %B + ret i32 %C } - -; CHECK: define i32 @test(ptr %p, i32 %i) #0 { -; CHECK-NEXT: entry: -; CHECK-NEXT: store i8 4, ptr %p, align 1 -; CHECK-NEXT: ret i32 0 -; CHECK-NEXT: } diff --git a/llvm/test/Transforms/GVN/load-from-unreachable-predecessor.ll b/llvm/test/Transforms/GVN/load-from-unreachable-predecessor.ll index 6ad0f59..c0b20d3 100644 --- a/llvm/test/Transforms/GVN/load-from-unreachable-predecessor.ll +++ b/llvm/test/Transforms/GVN/load-from-unreachable-predecessor.ll @@ -1,12 +1,21 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s ; Check that an unreachable predecessor to a PHI node doesn't cause a crash. -; PR21625. - +; PR21625. The first load should be removed, since it's ignored. define i32 @f(ptr %f) { -; CHECK: bb0: -; Load should be removed, since it's ignored. -; CHECK-NEXT: br label +; CHECK-LABEL: define i32 @f( +; CHECK-SAME: ptr [[F:%.*]]) { +; CHECK-NEXT: [[BB0:.*]]: +; CHECK-NEXT: br label %[[BB2:.*]] +; CHECK: [[BB1:.*]]: +; CHECK-NEXT: [[ZED:%.*]] = load ptr, ptr [[F]], align 8 +; CHECK-NEXT: br i1 false, label %[[BB1]], label %[[BB2]] +; CHECK: [[BB2]]: +; CHECK-NEXT: [[FOO:%.*]] = phi ptr [ null, %[[BB0]] ], [ [[ZED]], %[[BB1]] ] +; CHECK-NEXT: [[STOREMERGE:%.*]] = load i32, ptr [[FOO]], align 4 +; CHECK-NEXT: ret i32 [[STOREMERGE]] +; bb0: %bar = load ptr, ptr %f br label %bb2 diff --git a/llvm/test/Transforms/GVN/malloc-load-removal.ll b/llvm/test/Transforms/GVN/malloc-load-removal.ll index 0aa4beb..c86990f 100644 --- a/llvm/test/Transforms/GVN/malloc-load-removal.ll +++ b/llvm/test/Transforms/GVN/malloc-load-removal.ll @@ -1,4 +1,6 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -S -passes=gvn < %s | FileCheck %s + ; PR13694 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" @@ -6,6 +8,17 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 declare noalias ptr @malloc(i64) nounwind allockind("alloc,uninitialized") allocsize(0) define noalias ptr @test1() nounwind uwtable ssp { +; CHECK-LABEL: define noalias ptr @test1( +; CHECK-SAME: ) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @malloc(i64 100) #[[ATTR2:[0-9]+]] +; CHECK-NEXT: br i1 undef, label %[[IF_END:.*]], label %[[IF_THEN:.*]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: store i8 0, ptr [[CALL]], align 1 +; CHECK-NEXT: br label %[[IF_END]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: ret ptr [[CALL]] +; entry: %call = tail call ptr @malloc(i64 100) nounwind %0 = load i8, ptr %call, align 1 @@ -18,19 +31,22 @@ if.then: ; preds = %entry if.end: ; preds = %if.then, %entry ret ptr %call - -; CHECK-LABEL: @test1( -; CHECK-NOT: load -; CHECK-NOT: icmp - -; CHECK_NO_LIBCALLS-LABEL: @test1( -; CHECK_NO_LIBCALLS: load -; CHECK_NO_LIBCALLS: icmp } declare noalias ptr @_Znwm(i64) nounwind define noalias ptr @test2() nounwind uwtable ssp { +; CHECK-LABEL: define noalias ptr @test2( +; CHECK-SAME: ) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @_Znwm(i64 100) #[[ATTR2]] +; CHECK-NEXT: br i1 undef, label %[[IF_END:.*]], label %[[IF_THEN:.*]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: store i8 0, ptr [[CALL]], align 1 +; CHECK-NEXT: br label %[[IF_END]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: ret ptr [[CALL]] +; entry: %call = tail call ptr @_Znwm(i64 100) nounwind %0 = load i8, ptr %call, align 1 @@ -43,19 +59,22 @@ if.then: ; preds = %entry if.end: ; preds = %if.then, %entry ret ptr %call - -; CHECK-LABEL: @test2( -; CHECK-NOT: load -; CHECK-NOT: icmp - -; CHECK_NO_LIBCALLS-LABEL: @test2( -; CHECK_NO_LIBCALLS: load -; CHECK_NO_LIBCALLS: icmp } declare noalias ptr @aligned_alloc(i64 allocalign, i64) nounwind allockind("alloc,uninitialized,aligned") allocsize(1) define noalias ptr @test3() nounwind uwtable ssp { +; CHECK-LABEL: define noalias ptr @test3( +; CHECK-SAME: ) #[[ATTR1]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CALL:%.*]] = tail call ptr @aligned_alloc(i64 256, i64 32) #[[ATTR2]] +; CHECK-NEXT: br i1 undef, label %[[IF_END:.*]], label %[[IF_THEN:.*]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: store i8 0, ptr [[CALL]], align 1 +; CHECK-NEXT: br label %[[IF_END]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: ret ptr [[CALL]] +; entry: %call = tail call ptr @aligned_alloc(i64 256, i64 32) nounwind %0 = load i8, ptr %call, align 32 @@ -68,12 +87,4 @@ if.then: ; preds = %entry if.end: ; preds = %if.then, %entry ret ptr %call - -; CHECK-LABEL: @test3( -; CHECK-NOT: load -; CHECK-NOT: icmp - -; CHECK_NO_LIBCALLS-LABEL: @test3( -; CHECK_NO_LIBCALLS: load -; CHECK_NO_LIBCALLS: icmp } diff --git a/llvm/test/Transforms/GVN/mssa-update-dead-def.ll b/llvm/test/Transforms/GVN/mssa-update-dead-def.ll index ad71a04..1a5b704 100644 --- a/llvm/test/Transforms/GVN/mssa-update-dead-def.ll +++ b/llvm/test/Transforms/GVN/mssa-update-dead-def.ll @@ -1,12 +1,28 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes='require<memoryssa>,gvn' -verify-memoryssa -S %s | FileCheck %s ; This is a regression test for a bug in MemorySSA updater. ; Make sure that we don't crash and end up with a valid MemorySSA. -; CHECK: @test() define void @test() personality ptr null { +; CHECK-LABEL: define void @test() personality ptr null { +; CHECK-NEXT: invoke void @bar() +; CHECK-NEXT: to label %[[BAR_NORMAL:.*]] unwind label %[[EXCEPTIONAL:.*]] +; CHECK: [[BAR_NORMAL]]: +; CHECK-NEXT: ret void +; CHECK: [[DEAD_BLOCK:.*:]] +; CHECK-NEXT: invoke void @baz() +; CHECK-NEXT: to label %[[BAZ_NORMAL:.*]] unwind label %[[EXCEPTIONAL]] +; CHECK: [[BAZ_NORMAL]]: +; CHECK-NEXT: ret void +; CHECK: [[EXCEPTIONAL]]: +; CHECK-NEXT: [[TMP9:%.*]] = landingpad { ptr, i32 } +; CHECK-NEXT: cleanup +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: ret void +; invoke void @bar() - to label %bar.normal unwind label %exceptional + to label %bar.normal unwind label %exceptional bar.normal: ret void @@ -16,14 +32,14 @@ dead.block: baz.invoke: invoke void @baz() - to label %baz.normal unwind label %exceptional + to label %baz.normal unwind label %exceptional baz.normal: ret void exceptional: %tmp9 = landingpad { ptr, i32 } - cleanup + cleanup call void @foo() ret void } diff --git a/llvm/test/Transforms/GVN/no-mem-dep-info.ll b/llvm/test/Transforms/GVN/no-mem-dep-info.ll index 0380b7e..5f67902 100644 --- a/llvm/test/Transforms/GVN/no-mem-dep-info.ll +++ b/llvm/test/Transforms/GVN/no-mem-dep-info.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt %s -passes=gvn -S -enable-gvn-memdep=false | FileCheck %s ; RUN: opt %s -passes=gvn -S -enable-gvn-memdep=true | FileCheck %s @@ -11,6 +12,17 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, ptr, <8 x i32>, ; Function Attrs: nounwind define <8 x float> @foo1(ptr noalias readonly %arr.ptr, ptr noalias readonly %vix.ptr, ptr noalias %t2.ptr) #1 { +; CHECK-LABEL: define <8 x float> @foo1( +; CHECK-SAME: ptr noalias readonly [[ARR_PTR:%.*]], ptr noalias readonly [[VIX_PTR:%.*]], ptr noalias [[T2_PTR:%.*]]) { +; CHECK-NEXT: [[ALLOCAS:.*:]] +; CHECK-NEXT: [[VIX:%.*]] = load <8 x i32>, ptr [[VIX_PTR]], align 4 +; CHECK-NEXT: [[T1_PTR:%.*]] = getelementptr i8, ptr [[ARR_PTR]], i8 4 +; CHECK-NEXT: [[V1:%.*]] = tail call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, ptr [[ARR_PTR]], <8 x i32> [[VIX]], <8 x float> splat (float 0xFFFFFFFFE0000000), i8 1) +; CHECK-NEXT: store i8 1, ptr [[T1_PTR]], align 4 +; CHECK-NEXT: [[V2:%.*]] = tail call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, ptr [[ARR_PTR]], <8 x i32> [[VIX]], <8 x float> splat (float 0xFFFFFFFFE0000000), i8 1) +; CHECK-NEXT: [[RES:%.*]] = fadd <8 x float> [[V1]], [[V2]] +; CHECK-NEXT: ret <8 x float> [[RES]] +; allocas: %vix = load <8 x i32>, ptr %vix.ptr, align 4 %t1.ptr = getelementptr i8, ptr %arr.ptr, i8 4 @@ -23,7 +35,3 @@ allocas: ret <8 x float> %res } -; CHECK: foo1 -; CHECK: llvm.x86.avx2.gather.d.ps.256 -; CHECK: store -; CHECK: llvm.x86.avx2.gather.d.ps.256 diff --git a/llvm/test/Transforms/GVN/noalias.ll b/llvm/test/Transforms/GVN/noalias.ll index 98cc930..f28023d 100644 --- a/llvm/test/Transforms/GVN/noalias.ll +++ b/llvm/test/Transforms/GVN/noalias.ll @@ -1,9 +1,13 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s define i32 @test1(ptr %p, ptr %q) { -; CHECK-LABEL: @test1(ptr %p, ptr %q) -; CHECK: load i32, ptr %p, align 4, !noalias ![[SCOPE1:[0-9]+]] -; CHECK: %c = add i32 %a, %a +; CHECK-LABEL: define i32 @test1( +; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[P]], align 4, !noalias [[META0:![0-9]+]] +; CHECK-NEXT: [[C:%.*]] = add i32 [[A]], [[A]] +; CHECK-NEXT: ret i32 [[C]] +; %a = load i32, ptr %p, !noalias !3 %b = load i32, ptr %p %c = add i32 %a, %b @@ -11,9 +15,12 @@ define i32 @test1(ptr %p, ptr %q) { } define i32 @test2(ptr %p, ptr %q) { -; CHECK-LABEL: @test2(ptr %p, ptr %q) -; CHECK: load i32, ptr %p, align 4, !alias.scope ![[SCOPE1]] -; CHECK: %c = add i32 %a, %a +; CHECK-LABEL: define i32 @test2( +; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[P]], align 4, !alias.scope [[META0]] +; CHECK-NEXT: [[C:%.*]] = add i32 [[A]], [[A]] +; CHECK-NEXT: ret i32 [[C]] +; %a = load i32, ptr %p, !alias.scope !3 %b = load i32, ptr %p, !alias.scope !3 %c = add i32 %a, %b @@ -21,17 +28,18 @@ define i32 @test2(ptr %p, ptr %q) { } define i32 @test3(ptr %p, ptr %q) { -; CHECK-LABEL: @test3(ptr %p, ptr %q) -; CHECK: load i32, ptr %p, align 4, !alias.scope ![[SCOPE2:[0-9]+]] -; CHECK: %c = add i32 %a, %a +; CHECK-LABEL: define i32 @test3( +; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = load i32, ptr [[P]], align 4, !alias.scope [[META3:![0-9]+]] +; CHECK-NEXT: [[C:%.*]] = add i32 [[A]], [[A]] +; CHECK-NEXT: ret i32 [[C]] +; %a = load i32, ptr %p, !alias.scope !4 %b = load i32, ptr %p, !alias.scope !5 %c = add i32 %a, %b ret i32 %c } -; CHECK: ![[SCOPE1]] = !{!{{[0-9]+}}} -; CHECK: ![[SCOPE2]] = !{!{{[0-9]+}}} declare i32 @foo(ptr) readonly !0 = distinct !{!0, !2, !"callee0: %a"} @@ -41,3 +49,10 @@ declare i32 @foo(ptr) readonly !3 = !{!0} !4 = !{!1} !5 = !{!0, !1} +;. +; CHECK: [[META0]] = !{[[META1:![0-9]+]]} +; CHECK: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]], !"callee0: %a"} +; CHECK: [[META2]] = distinct !{[[META2]], !"callee0"} +; CHECK: [[META3]] = !{[[META4:![0-9]+]]} +; CHECK: [[META4]] = distinct !{[[META4]], [[META2]], !"callee0: %b"} +;. diff --git a/llvm/test/Transforms/GVN/non-local-offset.ll b/llvm/test/Transforms/GVN/non-local-offset.ll index 0467657..19b571e 100644 --- a/llvm/test/Transforms/GVN/non-local-offset.ll +++ b/llvm/test/Transforms/GVN/non-local-offset.ll @@ -1,16 +1,24 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s target datalayout = "e-p:64:64:64" ; GVN should ignore the store to p[1] to see that the load from p[0] is ; fully redundant. - -; CHECK-LABEL: @yes( -; CHECK: if.then: -; CHECK-NEXT: store i32 0, ptr %q -; CHECK-NEXT: ret void - define void @yes(i1 %c, ptr %p, ptr %q) nounwind { +; CHECK-LABEL: define void @yes( +; CHECK-SAME: i1 [[C:%.*]], ptr [[P:%.*]], ptr [[Q:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: store i32 0, ptr [[P]], align 4 +; CHECK-NEXT: [[P1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 1 +; CHECK-NEXT: store i32 1, ptr [[P1]], align 4 +; CHECK-NEXT: br i1 [[C]], label %[[IF_ELSE:.*]], label %[[IF_THEN:.*]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: store i32 0, ptr [[Q]], align 4 +; CHECK-NEXT: ret void +; CHECK: [[IF_ELSE]]: +; CHECK-NEXT: ret void +; entry: store i32 0, ptr %p %p1 = getelementptr inbounds i32, ptr %p, i64 1 @@ -29,16 +37,22 @@ if.else: ; GVN should ignore the store to p[1] to see that the first load from p[0] is ; fully redundant. However, the second load is larger, so it's not a simple ; redundancy. - -; CHECK-LABEL: @watch_out_for_size_change( -; CHECK: if.then: -; CHECK-NEXT: store i32 0, ptr %q -; CHECK-NEXT: ret void -; CHECK: if.else: -; CHECK: load i64, ptr %p -; CHECK: store i64 - define void @watch_out_for_size_change(i1 %c, ptr %p, ptr %q) nounwind { +; CHECK-LABEL: define void @watch_out_for_size_change( +; CHECK-SAME: i1 [[C:%.*]], ptr [[P:%.*]], ptr [[Q:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: store i32 0, ptr [[P]], align 4 +; CHECK-NEXT: [[P1:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 1 +; CHECK-NEXT: store i32 1, ptr [[P1]], align 4 +; CHECK-NEXT: br i1 [[C]], label %[[IF_ELSE:.*]], label %[[IF_THEN:.*]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: store i32 0, ptr [[Q]], align 4 +; CHECK-NEXT: ret void +; CHECK: [[IF_ELSE]]: +; CHECK-NEXT: [[T64:%.*]] = load i64, ptr [[P]], align 4 +; CHECK-NEXT: store i64 [[T64]], ptr [[Q]], align 4 +; CHECK-NEXT: ret void +; entry: store i32 0, ptr %p %p1 = getelementptr inbounds i32, ptr %p, i64 1 diff --git a/llvm/test/Transforms/GVN/nonescaping-malloc.ll b/llvm/test/Transforms/GVN/nonescaping-malloc.ll index 76d8cda..f67c958 100644 --- a/llvm/test/Transforms/GVN/nonescaping-malloc.ll +++ b/llvm/test/Transforms/GVN/nonescaping-malloc.ll @@ -1,5 +1,7 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; REQUIRES: asserts ; RUN: opt < %s -passes=gvn -stats -disable-output 2>&1 | FileCheck %s + ; rdar://7363102 ; CHECK: Number of loads deleted @@ -102,3 +104,5 @@ _ZN4llvm9StringMapIPvNS_15MallocAllocatorEE16GetOrCreateValueIS1_EERNS_14StringM } declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/Transforms/GVN/null-aliases-nothing.ll b/llvm/test/Transforms/GVN/null-aliases-nothing.ll index dc4ff406..81d44ce 100644 --- a/llvm/test/Transforms/GVN/null-aliases-nothing.ll +++ b/llvm/test/Transforms/GVN/null-aliases-nothing.ll @@ -1,19 +1,24 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s %t = type { i32 } declare void @test1f(ptr) -define void @test1(ptr noalias %stuff ) { - %before = load i32, ptr %stuff - - call void @test1f(ptr null) - - %after = load i32, ptr %stuff ; <--- This should be a dead load - %sum = add i32 %before, %after - - store i32 %sum, ptr %stuff - ret void -; CHECK: load -; CHECK-NOT: load -; CHECK: ret void +; `%stuff` is noalias, `test1f` receives only null, cannot clobber `%stuff`, +; thus the second load is dead. +define void @test1(ptr noalias %stuff) { +; CHECK-LABEL: define void @test1( +; CHECK-SAME: ptr noalias [[STUFF:%.*]]) { +; CHECK-NEXT: [[BEFORE:%.*]] = load i32, ptr [[STUFF]], align 4 +; CHECK-NEXT: call void @test1f(ptr null) +; CHECK-NEXT: [[SUM:%.*]] = add i32 [[BEFORE]], [[BEFORE]] +; CHECK-NEXT: store i32 [[SUM]], ptr [[STUFF]], align 4 +; CHECK-NEXT: ret void +; + %before = load i32, ptr %stuff + call void @test1f(ptr null) + %after = load i32, ptr %stuff + %sum = add i32 %before, %after + store i32 %sum, ptr %stuff + ret void } diff --git a/llvm/test/Transforms/GVN/phi-translate-partial-alias.ll b/llvm/test/Transforms/GVN/phi-translate-partial-alias.ll index a102976..358816f 100644 --- a/llvm/test/Transforms/GVN/phi-translate-partial-alias.ll +++ b/llvm/test/Transforms/GVN/phi-translate-partial-alias.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-f128:128:128-n8:16:32:64" @@ -6,12 +7,19 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 ; not actually redundant around the loop backedge, despite appearances ; if phi-translation is ignored. -; CHECK: define void @test0(ptr %begin) -; CHECK: loop: -; CHECK: %l0 = load i8, ptr %phi -; CHECK: call void @bar(i8 %l0) -; CHECK: %l1 = load i8, ptr %phi define void @test0(ptr %begin) { +; CHECK-LABEL: define void @test0( +; CHECK-SAME: ptr [[BEGIN:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[PHI:%.*]] = phi ptr [ [[BEGIN]], %[[ENTRY]] ], [ [[NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[L0:%.*]] = load i8, ptr [[PHI]], align 1 +; CHECK-NEXT: call void @bar(i8 [[L0]]) +; CHECK-NEXT: [[L1:%.*]] = load i8, ptr [[PHI]], align 1 +; CHECK-NEXT: [[NEXT]] = getelementptr inbounds i8, ptr [[PHI]], i8 [[L1]] +; CHECK-NEXT: br label %[[LOOP]] +; entry: br label %loop diff --git a/llvm/test/Transforms/GVN/pr10820.ll b/llvm/test/Transforms/GVN/pr10820.ll index 48b13a4..4b7be9c 100644 --- a/llvm/test/Transforms/GVN/pr10820.ll +++ b/llvm/test/Transforms/GVN/pr10820.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s target datalayout = @@ -7,12 +8,16 @@ target triple = "x86_64-unknown-linux-gnu" @g = external global i31 define void @main() nounwind uwtable { +; CHECK-LABEL: define void @main( +; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: store i32 402662078, ptr @g, align 8 +; CHECK-NEXT: store i31 402662078, ptr undef, align 1 +; CHECK-NEXT: unreachable +; entry: -; CHECK: store i32 store i32 402662078, ptr @g, align 8 -; CHECK-NOT: load i31 %0 = load i31, ptr @g, align 8 -; CHECK: store i31 store i31 %0, ptr undef, align 1 unreachable } diff --git a/llvm/test/Transforms/GVN/pr12979.ll b/llvm/test/Transforms/GVN/pr12979.ll index 2f7a463..5ff3aa2 100644 --- a/llvm/test/Transforms/GVN/pr12979.ll +++ b/llvm/test/Transforms/GVN/pr12979.ll @@ -1,10 +1,13 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s define i32 @test1(i32 %x, i32 %y) { -; CHECK: @test1(i32 %x, i32 %y) -; CHECK: %add1 = add i32 %x, %y -; CHECK: %foo = add i32 %add1, %add1 - +; CHECK-LABEL: define i32 @test1( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ADD1:%.*]] = add i32 [[X]], [[Y]] +; CHECK-NEXT: [[FOO:%.*]] = add i32 [[ADD1]], [[ADD1]] +; CHECK-NEXT: ret i32 [[FOO]] +; %add1 = add nsw i32 %x, %y %add2 = add i32 %x, %y %foo = add i32 %add1, %add2 @@ -12,10 +15,12 @@ define i32 @test1(i32 %x, i32 %y) { } define i32 @test2(i32 %x, i32 %y) { -; CHECK: @test2(i32 %x, i32 %y) -; CHECK: %add1 = add i32 %x, %y -; CHECK: %foo = add i32 %add1, %add1 - +; CHECK-LABEL: define i32 @test2( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ADD1:%.*]] = add i32 [[X]], [[Y]] +; CHECK-NEXT: [[FOO:%.*]] = add i32 [[ADD1]], [[ADD1]] +; CHECK-NEXT: ret i32 [[FOO]] +; %add1 = add nuw i32 %x, %y %add2 = add i32 %x, %y %foo = add i32 %add1, %add2 @@ -23,10 +28,12 @@ define i32 @test2(i32 %x, i32 %y) { } define i32 @test3(i32 %x, i32 %y) { -; CHECK: @test3(i32 %x, i32 %y) -; CHECK: %add1 = add i32 %x, %y -; CHECK: %foo = add i32 %add1, %add1 - +; CHECK-LABEL: define i32 @test3( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ADD1:%.*]] = add i32 [[X]], [[Y]] +; CHECK-NEXT: [[FOO:%.*]] = add i32 [[ADD1]], [[ADD1]] +; CHECK-NEXT: ret i32 [[FOO]] +; %add1 = add nuw nsw i32 %x, %y %add2 = add i32 %x, %y %foo = add i32 %add1, %add2 @@ -34,10 +41,12 @@ define i32 @test3(i32 %x, i32 %y) { } define i32 @test4(i32 %x, i32 %y) { -; CHECK: @test4(i32 %x, i32 %y) -; CHECK: %add1 = add nsw i32 %x, %y -; CHECK: %foo = add i32 %add1, %add1 - +; CHECK-LABEL: define i32 @test4( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[X]], [[Y]] +; CHECK-NEXT: [[FOO:%.*]] = add i32 [[ADD1]], [[ADD1]] +; CHECK-NEXT: ret i32 [[FOO]] +; %add1 = add nsw i32 %x, %y %add2 = add nsw i32 %x, %y %foo = add i32 %add1, %add2 @@ -45,10 +54,12 @@ define i32 @test4(i32 %x, i32 %y) { } define i32 @test5(i32 %x, i32 %y) { -; CHECK: @test5(i32 %x, i32 %y) -; CHECK: %add1 = add i32 %x, %y -; CHECK: %foo = add i32 %add1, %add1 - +; CHECK-LABEL: define i32 @test5( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ADD1:%.*]] = add i32 [[X]], [[Y]] +; CHECK-NEXT: [[FOO:%.*]] = add i32 [[ADD1]], [[ADD1]] +; CHECK-NEXT: ret i32 [[FOO]] +; %add1 = add nuw i32 %x, %y %add2 = add nsw i32 %x, %y %foo = add i32 %add1, %add2 @@ -56,10 +67,12 @@ define i32 @test5(i32 %x, i32 %y) { } define i32 @test6(i32 %x, i32 %y) { -; CHECK: @test6(i32 %x, i32 %y) -; CHECK: %add1 = add nsw i32 %x, %y -; CHECK: %foo = add i32 %add1, %add1 - +; CHECK-LABEL: define i32 @test6( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[X]], [[Y]] +; CHECK-NEXT: [[FOO:%.*]] = add i32 [[ADD1]], [[ADD1]] +; CHECK-NEXT: ret i32 [[FOO]] +; %add1 = add nuw nsw i32 %x, %y %add2 = add nsw i32 %x, %y %foo = add i32 %add1, %add2 @@ -67,11 +80,12 @@ define i32 @test6(i32 %x, i32 %y) { } define i32 @test7(i32 %x, i32 %y) { -; CHECK: @test7(i32 %x, i32 %y) -; CHECK: %add1 = add i32 %x, %y -; CHECK-NOT: what_is_this -; CHECK: %foo = add i32 %add1, %add1 - +; CHECK-LABEL: define i32 @test7( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[ADD1:%.*]] = add i32 [[X]], [[Y]] +; CHECK-NEXT: [[FOO:%.*]] = add i32 [[ADD1]], [[ADD1]] +; CHECK-NEXT: ret i32 [[FOO]] +; %add1 = add i32 %x, %y, !what_is_this !{} %add2 = add i32 %x, %y %foo = add i32 %add1, %add2 @@ -81,11 +95,12 @@ define i32 @test7(i32 %x, i32 %y) { declare void @mumble(i2, i2) define void @test8(i2 %x) { -; CHECK-LABEL: @test8( -; CHECK: %[[ashr:.*]] = ashr i2 %x, 1 -; CHECK-NEXT: call void @mumble(i2 %[[ashr]], i2 %[[ashr]]) -; CHECK-NEXT: ret void - +; CHECK-LABEL: define void @test8( +; CHECK-SAME: i2 [[X:%.*]]) { +; CHECK-NEXT: [[ASHR0:%.*]] = ashr i2 [[X]], 1 +; CHECK-NEXT: call void @mumble(i2 [[ASHR0]], i2 [[ASHR0]]) +; CHECK-NEXT: ret void +; %ashr0 = ashr exact i2 %x, 1 %ashr1 = ashr i2 %x, 1 call void @mumble(i2 %ashr0, i2 %ashr1) diff --git a/llvm/test/Transforms/GVN/pr17732.ll b/llvm/test/Transforms/GVN/pr17732.ll index c6ebd7a..29c7931c 100644 --- a/llvm/test/Transforms/GVN/pr17732.ll +++ b/llvm/test/Transforms/GVN/pr17732.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S -o - < %s | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" @@ -13,6 +14,12 @@ target triple = "x86_64-unknown-linux-gnu" @vector_with_zeroinit = common global %struct.with_vector zeroinitializer, align 4 define i32 @main() { +; CHECK-LABEL: define i32 @main() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @array_with_zeroinit, ptr align 4 @main.obj_with_array, i64 12, i1 false) +; CHECK-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @vector_with_zeroinit, ptr align 4 @main.obj_with_vector, i64 12, i1 false) +; CHECK-NEXT: ret i32 1 +; entry: tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @array_with_zeroinit, ptr align 4 @main.obj_with_array, i64 12, i1 false) %0 = load i8, ptr getelementptr inbounds (%struct.with_array, ptr @array_with_zeroinit, i64 0, i32 2), align 4 @@ -23,8 +30,6 @@ entry: %conv1 = sext i8 %1 to i32 %and = and i32 %conv0, %conv1 ret i32 %and -; CHECK-LABEL: define i32 @main( -; CHECK: ret i32 1 } declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) diff --git a/llvm/test/Transforms/GVN/pr17852.ll b/llvm/test/Transforms/GVN/pr17852.ll index 731cbc6..c464cf2 100644 --- a/llvm/test/Transforms/GVN/pr17852.ll +++ b/llvm/test/Transforms/GVN/pr17852.ll @@ -1,7 +1,69 @@ -; RUN: opt < %s -passes=gvn +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -passes=gvn -S -o - < %s | FileCheck %s + target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" %struct.S0 = type { [2 x i8], [2 x i8], [4 x i8], [2 x i8], i32, i32, i32, i32 } + define void @fn1(ptr byval(%struct.S0) align 8 %p1) { +; CHECK-LABEL: define void @fn1( +; CHECK-SAME: ptr byval([[STRUCT_S0:%.*]]) align 8 [[P1:%.*]]) { +; CHECK-NEXT: br label %[[FOR_COND:.*]] +; CHECK: [[FOR_COND]]: +; CHECK-NEXT: br i1 true, label %[[IF_ELSE:.*]], label %[[IF_THEN:.*]] +; CHECK: [[BB1:.*:]] +; CHECK-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 2 +; CHECK-NEXT: [[F9:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 7 +; CHECK-NEXT: br label %[[FOR_COND]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: [[F22:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 2 +; CHECK-NEXT: [[F7:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 5 +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[F7]], align 8 +; CHECK-NEXT: br label %[[IF_END40:.*]] +; CHECK: [[IF_ELSE]]: +; CHECK-NEXT: br i1 false, label %[[FOR_COND18:.*]], label %[[IF_THEN6:.*]] +; CHECK: [[IF_THEN6]]: +; CHECK-NEXT: [[F3:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 2 +; CHECK-NEXT: [[F5:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 3 +; CHECK-NEXT: br label %[[IF_END36:.*]] +; CHECK: [[FOR_COND18]]: +; CHECK-NEXT: call void @fn4() +; CHECK-NEXT: br i1 true, label %[[IF_END:.*]], label %[[FOR_COND18_IF_END36_CRIT_EDGE:.*]] +; CHECK: [[FOR_COND18_IF_END36_CRIT_EDGE]]: +; CHECK-NEXT: br label %[[IF_END36]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: [[F321:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 2 +; CHECK-NEXT: [[F925:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 7 +; CHECK-NEXT: [[F526:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 3 +; CHECK-NEXT: [[BF_LOAD27:%.*]] = load i16, ptr [[F526]], align 8 +; CHECK-NEXT: br label %[[IF_END36]] +; CHECK: [[IF_END36]]: +; CHECK-NEXT: [[F537:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 3 +; CHECK-NEXT: [[BF_LOAD38:%.*]] = load i16, ptr [[F537]], align 8 +; CHECK-NEXT: [[BF_CLEAR39:%.*]] = and i16 [[BF_LOAD38]], -16384 +; CHECK-NEXT: br label %[[IF_END40]] +; CHECK: [[IF_END40]]: +; CHECK-NEXT: [[BF_LOAD522:%.*]] = phi i16 [ [[BF_LOAD38]], %[[IF_END36]] ], [ poison, %[[IF_THEN]] ] +; CHECK-NEXT: [[F6:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 4 +; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[F6]], align 4 +; CHECK-NEXT: call void @fn2(i32 [[TMP18]]) +; CHECK-NEXT: [[F8:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 6 +; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[F8]], align 4 +; CHECK-NEXT: [[TOBOOL41:%.*]] = icmp eq i32 [[TMP19]], 0 +; CHECK-NEXT: br i1 true, label %[[IF_END40_IF_END50_CRIT_EDGE:.*]], label %[[IF_THEN42:.*]] +; CHECK: [[IF_END40_IF_END50_CRIT_EDGE]]: +; CHECK-NEXT: [[F551_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 3 +; CHECK-NEXT: [[BF_LOAD52_PRE:%.*]] = load i16, ptr [[F551_PHI_TRANS_INSERT]], align 8 +; CHECK-NEXT: br label %[[IF_END50:.*]] +; CHECK: [[IF_THEN42]]: +; CHECK-NEXT: [[F547:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 3 +; CHECK-NEXT: [[BF_LOAD48:%.*]] = load i16, ptr [[F547]], align 8 +; CHECK-NEXT: br label %[[IF_END50]] +; CHECK: [[IF_END50]]: +; CHECK-NEXT: [[BF_LOAD52:%.*]] = phi i16 [ [[BF_LOAD52_PRE]], %[[IF_END40_IF_END50_CRIT_EDGE]] ], [ [[BF_LOAD522]], %[[IF_THEN42]] ] +; CHECK-NEXT: [[F551:%.*]] = getelementptr inbounds [[STRUCT_S0]], ptr [[P1]], i64 0, i32 3 +; CHECK-NEXT: [[BF_CLEAR53:%.*]] = and i16 [[BF_LOAD52]], -16384 +; CHECK-NEXT: ret void +; br label %for.cond for.cond: ; preds = %1, %0 br label %for.end diff --git a/llvm/test/Transforms/GVN/pr24397.ll b/llvm/test/Transforms/GVN/pr24397.ll index 8ef9360..a663350 100644 --- a/llvm/test/Transforms/GVN/pr24397.ll +++ b/llvm/test/Transforms/GVN/pr24397.ll @@ -1,8 +1,21 @@ -; RUN: opt -passes=gvn -disable-output < %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -passes=gvn -S -o - < %s | FileCheck %s target triple = "x86_64-unknown-linux-gnu" define i64 @foo(ptr %arrayidx) { +; CHECK-LABEL: define i64 @foo( +; CHECK-SAME: ptr [[ARRAYIDX:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[P:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 +; CHECK-NEXT: [[CMPNULL:%.*]] = icmp eq ptr [[P]], null +; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[P]] to i64 +; CHECK-NEXT: br label %[[BB2:.*]] +; CHECK: [[ENTRY2:.*:]] +; CHECK-NEXT: br label %[[BB2]] +; CHECK: [[BB2]]: +; CHECK-NEXT: ret i64 [[TMP0]] +; entry: %p = load ptr, ptr %arrayidx, align 8 %cmpnull = icmp eq ptr %p, null diff --git a/llvm/test/Transforms/GVN/pr24426.ll b/llvm/test/Transforms/GVN/pr24426.ll index 2a08857..d296e15a0 100644 --- a/llvm/test/Transforms/GVN/pr24426.ll +++ b/llvm/test/Transforms/GVN/pr24426.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=memcpyopt,mldst-motion,gvn -S | FileCheck %s declare void @check(i8) @@ -5,13 +6,17 @@ declare void @check(i8) declare void @write(ptr %res) define void @test1() { +; CHECK-LABEL: define void @test1() { +; CHECK-NEXT: [[TMP1:%.*]] = alloca [10 x i8], align 1 +; CHECK-NEXT: call void @write(ptr [[TMP1]]) +; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1 +; CHECK-NEXT: call void @check(i8 [[TMP2]]) +; CHECK-NEXT: ret void +; %1 = alloca [10 x i8] call void @write(ptr %1) %2 = load i8, ptr %1 - -; CHECK-NOT: undef call void @check(i8 %2) - ret void } diff --git a/llvm/test/Transforms/GVN/pr25440.ll b/llvm/test/Transforms/GVN/pr25440.ll index 507111ef..046775e 100644 --- a/llvm/test/Transforms/GVN/pr25440.ll +++ b/llvm/test/Transforms/GVN/pr25440.ll @@ -1,4 +1,5 @@ -;RUN: opt -passes=gvn -S < %s | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -passes=gvn -S < %s | FileCheck %s target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n8:16:32-S64" target triple = "thumbv7--linux-gnueabi" @@ -10,19 +11,53 @@ target triple = "thumbv7--linux-gnueabi" ; Function Attrs: nounwind define fastcc void @foo(ptr nocapture readonly %x) { -;CHECK-LABEL: foo +; CHECK-LABEL: define fastcc void @foo( +; CHECK-SAME: ptr readonly captures(none) [[X:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[BB0:.*]] +; CHECK: [[BB0]]: +; CHECK-NEXT: [[X_TR:%.*]] = phi ptr [ [[X]], %[[ENTRY]] ], [ null, %[[LAND_LHS_TRUE:.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[X_TR]], align 4 +; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[TMP0]] to i32 +; CHECK-NEXT: switch i32 [[CONV]], label %[[IF_END_50:.*]] [ +; CHECK-NEXT: i32 43, label %[[CLEANUP:.*]] +; CHECK-NEXT: i32 52, label %[[IF_THEN_5:.*]] +; CHECK-NEXT: ] +; CHECK: [[IF_THEN_5]]: +; CHECK-NEXT: br i1 undef, label %[[LAND_LHS_TRUE]], label %[[IF_THEN_26:.*]] +; CHECK: [[LAND_LHS_TRUE]]: +; CHECK-NEXT: br i1 undef, label %[[CLEANUP]], label %[[BB0]] +; CHECK: [[IF_THEN_26]]: +; CHECK-NEXT: br i1 undef, label %[[COND_END:.*]], label %[[COND_FALSE:.*]] +; CHECK: [[COND_FALSE]]: +; CHECK-NEXT: [[MODE:%.*]] = getelementptr inbounds [[STRUCT_A:%.*]], ptr [[X_TR]], i32 0, i32 1 +; CHECK-NEXT: [[BF_LOAD:%.*]] = load i16, ptr [[MODE]], align 2 +; CHECK-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 8 +; CHECK-NEXT: br label %[[COND_END]] +; CHECK: [[COND_END]]: +; CHECK-NEXT: br i1 undef, label %[[IF_THEN_44:.*]], label %[[CLEANUP]] +; CHECK: [[IF_THEN_44]]: +; CHECK-NEXT: unreachable +; CHECK: [[IF_END_50]]: +; CHECK-NEXT: [[ARRAYIDX52:%.*]] = getelementptr inbounds [0 x i32], ptr @length, i32 0, i32 [[CONV]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX52]], align 4 +; CHECK-NEXT: br i1 undef, label %[[FOR_BODY_57:.*]], label %[[CLEANUP]] +; CHECK: [[FOR_BODY_57]]: +; CHECK-NEXT: [[I_2157:%.*]] = add nsw i32 [[TMP1]], -1 +; CHECK-NEXT: unreachable +; CHECK: [[CLEANUP]]: +; CHECK-NEXT: ret void +; entry: br label %bb0 bb0: ; preds = %land.lhs.true, %entry -;CHECK: bb0: %x.tr = phi ptr [ %x, %entry ], [ null, %land.lhs.true ] %0 = load i16, ptr %x.tr, align 4 -; CHECK: load i16, ptr %conv = zext i16 %0 to i32 switch i32 %conv, label %if.end.50 [ - i32 43, label %cleanup - i32 52, label %if.then.5 + i32 43, label %cleanup + i32 52, label %if.then.5 ] if.then.5: ; preds = %bb0 @@ -36,8 +71,6 @@ if.then.26: ; preds = %if.then.5 br i1 undef, label %cond.end, label %cond.false cond.false: ; preds = %if.then.26 -; CHECK: cond.false: -; CHECK: load i16 %mode = getelementptr inbounds %struct.a, ptr %x.tr.lcssa163, i32 0, i32 1 %bf.load = load i16, ptr %mode, align 2 %bf.shl = shl i16 %bf.load, 8 @@ -50,7 +83,6 @@ if.then.44: ; preds = %cond.end unreachable if.end.50: ; preds = %bb0 -;%CHECK: if.end.50: %conv.lcssa = phi i32 [ %conv, %bb0 ] %arrayidx52 = getelementptr inbounds [0 x i32], ptr @length, i32 0, i32 %conv.lcssa %1 = load i32, ptr %arrayidx52, align 4 @@ -68,7 +100,38 @@ cleanup: ; preds = %if.end.50, %cond.en @dfg_text = external global ptr, align 4 define void @dfg_lex() { -;CHECK-LABEL: dfg_lex +; CHECK-LABEL: define void @dfg_lex() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[WHILE_BODYTHREAD_PRE_SPLIT:.*]] +; CHECK: [[WHILE_BODYTHREAD_PRE_SPLIT]]: +; CHECK-NEXT: br i1 undef, label %[[WHILE_BODYTHREAD_PRE_SPLIT_IF_THEN_14_CRIT_EDGE:.*]], label %[[IF_END_15:.*]] +; CHECK: [[WHILE_BODYTHREAD_PRE_SPLIT_IF_THEN_14_CRIT_EDGE]]: +; CHECK-NEXT: [[V1_PRE:%.*]] = load i32, ptr @dfg_text, align 4 +; CHECK-NEXT: br label %[[IF_THEN_14:.*]] +; CHECK: [[IF_THEN_14]]: +; CHECK-NEXT: [[V1:%.*]] = phi i32 [ [[V1_PRE]], %[[WHILE_BODYTHREAD_PRE_SPLIT_IF_THEN_14_CRIT_EDGE]] ], [ [[SUB_PTR_RHS_CAST25:%.*]], %[[WHILE_END:.*]] ] +; CHECK-NEXT: br label %[[IF_END_15]] +; CHECK: [[IF_END_15]]: +; CHECK-NEXT: [[V2:%.*]] = load ptr, ptr @yy_c_buf_p, align 4 +; CHECK-NEXT: br label %[[WHILE_COND_16:.*]] +; CHECK: [[WHILE_COND_16]]: +; CHECK-NEXT: br i1 undef, label %[[WHILE_COND_16]], label %[[WHILE_END]] +; CHECK: [[WHILE_END]]: +; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[V2]], i32 undef +; CHECK-NEXT: store ptr [[ADD_PTR]], ptr @dfg_text, align 4 +; CHECK-NEXT: [[SUB_PTR_RHS_CAST25]] = ptrtoint ptr [[ADD_PTR]] to i32 +; CHECK-NEXT: [[SUB_PTR_SUB26:%.*]] = sub i32 0, [[SUB_PTR_RHS_CAST25]] +; CHECK-NEXT: switch i32 undef, label %[[SW_DEFAULT:.*]] [ +; CHECK-NEXT: i32 65, label %[[WHILE_BODYTHREAD_PRE_SPLIT]] +; CHECK-NEXT: i32 3, label %[[RETURN:.*]] +; CHECK-NEXT: i32 57, label %[[WHILE_BODYTHREAD_PRE_SPLIT]] +; CHECK-NEXT: i32 60, label %[[IF_THEN_14]] +; CHECK-NEXT: ] +; CHECK: [[SW_DEFAULT]]: +; CHECK-NEXT: unreachable +; CHECK: [[RETURN]]: +; CHECK-NEXT: ret void +; entry: br label %while.bodythread-pre-split @@ -93,10 +156,10 @@ while.end: ; preds = %while.cond.16 %sub.ptr.rhs.cast25 = ptrtoint ptr %add.ptr to i32 %sub.ptr.sub26 = sub i32 0, %sub.ptr.rhs.cast25 switch i32 undef, label %sw.default [ - i32 65, label %while.bodythread-pre-split - i32 3, label %return - i32 57, label %while.bodythread-pre-split - i32 60, label %if.then.14 + i32 65, label %while.bodythread-pre-split + i32 3, label %return + i32 57, label %while.bodythread-pre-split + i32 60, label %if.then.14 ] sw.default: ; preds = %while.end diff --git a/llvm/test/Transforms/GVN/pr28562.ll b/llvm/test/Transforms/GVN/pr28562.ll index 338200a..02301dc 100644 --- a/llvm/test/Transforms/GVN/pr28562.ll +++ b/llvm/test/Transforms/GVN/pr28562.ll @@ -1,9 +1,13 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -S -passes=gvn < %s | FileCheck %s + define ptr @test1(ptr %a) { +; CHECK-LABEL: define ptr @test1( +; CHECK-SAME: ptr [[A:%.*]]) { +; CHECK-NEXT: [[X1:%.*]] = getelementptr i32, ptr [[A]], i32 10 +; CHECK-NEXT: ret ptr [[X1]] +; %x1 = getelementptr inbounds i32, ptr %a, i32 10 %x2 = getelementptr i32, ptr %a, i32 10 ret ptr %x2 -; CHECK-LABEL: @test1( -; CHECK: %[[x:.*]] = getelementptr i32, ptr %a, i32 10 -; CHECK: ret ptr %[[x]] } diff --git a/llvm/test/Transforms/GVN/pr28879.ll b/llvm/test/Transforms/GVN/pr28879.ll index 0c9231d..b961a55 100644 --- a/llvm/test/Transforms/GVN/pr28879.ll +++ b/llvm/test/Transforms/GVN/pr28879.ll @@ -1,12 +1,22 @@ -; RUN: opt -passes=gvn <%s -S -o - | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -passes=gvn < %s -S -o - | FileCheck %s define void @f() { +; CHECK-LABEL: define void @f() { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[A:%.*]] = alloca <7 x i1>, align 2 +; CHECK-NEXT: store <7 x i1> undef, ptr [[A]], align 2 +; CHECK-NEXT: [[VAL:%.*]] = load i1, ptr [[A]], align 2 +; CHECK-NEXT: br i1 [[VAL]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +; CHECK: [[COND_TRUE]]: +; CHECK-NEXT: ret void +; CHECK: [[COND_FALSE]]: +; CHECK-NEXT: ret void +; entry: %a = alloca <7 x i1>, align 2 store <7 x i1> undef, ptr %a, align 2 -; CHECK: store <7 x i1> undef, ptr %val = load i1, ptr %a, align 2 -; CHECK: load i1, ptr br i1 %val, label %cond.true, label %cond.false cond.true: @@ -17,11 +27,20 @@ cond.false: } define <7 x i1> @g(ptr %a) { +; CHECK-LABEL: define <7 x i1> @g( +; CHECK-SAME: ptr [[A:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[VEC:%.*]] = load <7 x i1>, ptr [[A]], align 1 +; CHECK-NEXT: [[VAL:%.*]] = load i1, ptr [[A]], align 2 +; CHECK-NEXT: br i1 [[VAL]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +; CHECK: [[COND_TRUE]]: +; CHECK-NEXT: ret <7 x i1> [[VEC]] +; CHECK: [[COND_FALSE]]: +; CHECK-NEXT: ret <7 x i1> zeroinitializer +; entry: %vec = load <7 x i1>, ptr %a -; CHECK: load <7 x i1>, ptr %val = load i1, ptr %a, align 2 -; CHECK: load i1, ptr br i1 %val, label %cond.true, label %cond.false cond.true: diff --git a/llvm/test/Transforms/GVN/pr36063.ll b/llvm/test/Transforms/GVN/pr36063.ll index 5ac4c3d..8aaeff6 100644 --- a/llvm/test/Transforms/GVN/pr36063.ll +++ b/llvm/test/Transforms/GVN/pr36063.ll @@ -1,6 +1,20 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=memcpyopt,mldst-motion,gvn -S | FileCheck %s define void @foo(ptr %ret, i1 %x) { +; CHECK-LABEL: define void @foo( +; CHECK-SAME: ptr [[RET:%.*]], i1 [[X:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1 +; CHECK-NEXT: br i1 [[X]], label %[[YES:.*]], label %[[NO:.*]] +; CHECK: [[YES]]: +; CHECK-NEXT: br label %[[OUT:.*]] +; CHECK: [[NO]]: +; CHECK-NEXT: br label %[[OUT]] +; CHECK: [[OUT]]: +; CHECK-NEXT: store i8 5, ptr [[A]], align 1 +; CHECK-NEXT: store i8 5, ptr [[RET]], align 1 +; CHECK-NEXT: ret void +; %a = alloca i8 br i1 %x, label %yes, label %no @@ -14,7 +28,6 @@ no: ; preds = %0 out: ; preds = %no, %yes %tmp = load i8, ptr %a -; CHECK-NOT: undef store i8 %tmp, ptr %ret ret void } diff --git a/llvm/test/Transforms/GVN/pr42605.ll b/llvm/test/Transforms/GVN/pr42605.ll index f0ff6d9..3e6241c 100644 --- a/llvm/test/Transforms/GVN/pr42605.ll +++ b/llvm/test/Transforms/GVN/pr42605.ll @@ -1,6 +1,9 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn %s -S | FileCheck %s + ; PR42605. Check phi-translate won't translate the value number of a call ; to the value of another call with clobber in between. + target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" @@ -12,6 +15,13 @@ declare dso_local i32 @printf(ptr nocapture readonly, ...) local_unnamed_addr ; Function Attrs: noinline norecurse nounwind readonly uwtable define dso_local i32 @_Z3gooi(i32 %i) local_unnamed_addr #0 { +; CHECK-LABEL: define dso_local i32 @_Z3gooi( +; CHECK-SAME: i32 [[I:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[T0:%.*]] = load i32, ptr @global, align 4, !tbaa [[INT_TBAA2:![0-9]+]] +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[T0]], [[I]] +; CHECK-NEXT: ret i32 [[ADD]] +; entry: %t0 = load i32, ptr @global, align 4, !tbaa !2 %add = add nsw i32 %t0, %i @@ -20,6 +30,24 @@ entry: ; Function Attrs: nofree nounwind uwtable define dso_local void @noclobber() local_unnamed_addr { +; CHECK-LABEL: define dso_local void @noclobber() local_unnamed_addr { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @_Z3gooi(i32 2) +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[CALL]], 5 +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[ADD]], 2 +; CHECK-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: [[CALL1:%.*]] = tail call i32 @_Z3gooi(i32 3) +; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[CALL1]], 5 +; CHECK-NEXT: br label %[[IF_END]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: [[ADD4_PRE_PHI:%.*]] = phi i32 [ [[ADD2]], %[[IF_THEN]] ], [ [[ADD]], %[[ENTRY]] ] +; CHECK-NEXT: [[I_0:%.*]] = phi i32 [ 3, %[[IF_THEN]] ], [ 2, %[[ENTRY]] ] +; CHECK-NEXT: [[GLOBAL2_0:%.*]] = phi i32 [ [[ADD2]], %[[IF_THEN]] ], [ [[ADD]], %[[ENTRY]] ] +; CHECK-NEXT: [[CALL3:%.*]] = tail call i32 @_Z3gooi(i32 [[I_0]]) +; CHECK-NEXT: [[CALL5:%.*]] = tail call i32 (ptr, ...) @printf(ptr @.str, i32 [[GLOBAL2_0]], i32 [[ADD4_PRE_PHI]]) +; CHECK-NEXT: ret void +; entry: %call = tail call i32 @_Z3gooi(i32 2) %add = add nsw i32 %call, 5 @@ -32,9 +60,6 @@ if.then: ; preds = %entry br label %if.end ; Check pre happens after phitranslate. -; CHECK-LABEL: @noclobber -; CHECK: %add4.pre-phi = phi i32 [ %add2, %if.then ], [ %add, %entry ] -; CHECK: printf(ptr @.str, i32 %global2.0, i32 %add4.pre-phi) if.end: ; preds = %if.then, %entry %i.0 = phi i32 [ 3, %if.then ], [ 2, %entry ] @@ -47,6 +72,25 @@ if.end: ; preds = %if.then, %entry ; Function Attrs: nofree nounwind uwtable define dso_local void @hasclobber() local_unnamed_addr { +; CHECK-LABEL: define dso_local void @hasclobber() local_unnamed_addr { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @_Z3gooi(i32 2) +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[CALL]], 5 +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[ADD]], 2 +; CHECK-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: [[CALL1:%.*]] = tail call i32 @_Z3gooi(i32 3) +; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[CALL1]], 5 +; CHECK-NEXT: br label %[[IF_END]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: [[I_0:%.*]] = phi i32 [ 3, %[[IF_THEN]] ], [ 2, %[[ENTRY]] ] +; CHECK-NEXT: [[GLOBAL2_0:%.*]] = phi i32 [ [[ADD2]], %[[IF_THEN]] ], [ [[ADD]], %[[ENTRY]] ] +; CHECK-NEXT: store i32 5, ptr @global, align 4, !tbaa [[INT_TBAA2]] +; CHECK-NEXT: [[CALL3:%.*]] = tail call i32 @_Z3gooi(i32 [[I_0]]) +; CHECK-NEXT: [[ADD4:%.*]] = add nsw i32 [[CALL3]], 5 +; CHECK-NEXT: [[CALL5:%.*]] = tail call i32 (ptr, ...) @printf(ptr @.str, i32 [[GLOBAL2_0]], i32 [[ADD4]]) +; CHECK-NEXT: ret void +; entry: %call = tail call i32 @_Z3gooi(i32 2) %add = add nsw i32 %call, 5 @@ -59,10 +103,6 @@ if.then: ; preds = %entry br label %if.end ; Check no pre happens. -; CHECK-LABEL: @hasclobber -; CHECK: %call3 = tail call i32 @_Z3gooi(i32 %i.0) -; CHECK-NEXT: %add4 = add nsw i32 %call3, 5 -; CHECK-NEXT: printf(ptr @.str, i32 %global2.0, i32 %add4) if.end: ; preds = %if.then, %entry %i.0 = phi i32 [ 3, %if.then ], [ 2, %entry ] @@ -85,3 +125,9 @@ attributes #0 = { noinline norecurse nounwind readonly uwtable "correctly-rounde !3 = !{!"int", !4, i64 0} !4 = !{!"omnipotent char", !5, i64 0} !5 = !{!"Simple C++ TBAA"} +;. +; CHECK: [[INT_TBAA2]] = !{[[META3:![0-9]+]], [[META3]], i64 0} +; CHECK: [[META3]] = !{!"int", [[META4:![0-9]+]], i64 0} +; CHECK: [[META4]] = !{!"omnipotent char", [[META5:![0-9]+]], i64 0} +; CHECK: [[META5]] = !{!"Simple C++ TBAA"} +;. diff --git a/llvm/test/Transforms/GVN/pr49193.ll b/llvm/test/Transforms/GVN/pr49193.ll index 9ee9f26..52703ee 100644 --- a/llvm/test/Transforms/GVN/pr49193.ll +++ b/llvm/test/Transforms/GVN/pr49193.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s @a = external local_unnamed_addr global i32, align 4 @@ -6,9 +7,37 @@ ; Function Attrs: nounwind readnone declare ptr @j() local_unnamed_addr #0 -; CHECK: define {{.*}}@k() - define i64 @k() local_unnamed_addr { +; CHECK-LABEL: define i64 @k() local_unnamed_addr { +; CHECK-NEXT: [[BB:.*:]] +; CHECK-NEXT: br i1 undef, label %[[BB10_PREHEADER:.*]], label %[[BB3:.*]] +; CHECK: [[BB10_PREHEADER]]: +; CHECK-NEXT: br label %[[BB13:.*]] +; CHECK: [[BB3]]: +; CHECK-NEXT: [[I4:%.*]] = load i32, ptr @a, align 4 +; CHECK-NEXT: [[I5_NOT:%.*]] = icmp eq i32 [[I4]], 0 +; CHECK-NEXT: [[I8:%.*]] = tail call ptr @j() +; CHECK-NEXT: br label %[[BB37:.*]] +; CHECK: [[BB13]]: +; CHECK-NEXT: br i1 undef, label %[[BB30THREAD_PRE_SPLIT:.*]], label %[[BB16:.*]] +; CHECK: [[BB16]]: +; CHECK-NEXT: [[I17:%.*]] = tail call ptr @j() +; CHECK-NEXT: br i1 undef, label %[[BB22THREAD_PRE_SPLIT:.*]], label %[[BB37_LOOPEXIT:.*]] +; CHECK: [[BB22THREAD_PRE_SPLIT]]: +; CHECK-NEXT: br i1 undef, label %[[BB30THREAD_PRE_SPLIT]], label %[[BB37_LOOPEXIT]] +; CHECK: [[BB30THREAD_PRE_SPLIT]]: +; CHECK-NEXT: [[I31_PR:%.*]] = load i32, ptr @a, align 4 +; CHECK-NEXT: [[I32_NOT2:%.*]] = icmp eq i32 [[I31_PR]], 0 +; CHECK-NEXT: br i1 undef, label %[[BB37_LOOPEXIT]], label %[[BB13]] +; CHECK: [[BB37_LOOPEXIT]]: +; CHECK-NEXT: [[I38_PRE:%.*]] = load i32, ptr @a, align 4 +; CHECK-NEXT: br label %[[BB37]] +; CHECK: [[BB37]]: +; CHECK-NEXT: [[I38:%.*]] = phi i32 [ [[I38_PRE]], %[[BB37_LOOPEXIT]] ], [ [[I4]], %[[BB3]] ] +; CHECK-NEXT: store i32 [[I38]], ptr @b, align 4 +; CHECK-NEXT: [[I39:%.*]] = tail call ptr @j() +; CHECK-NEXT: unreachable +; bb: br i1 undef, label %bb10.preheader, label %bb3 diff --git a/llvm/test/Transforms/GVN/pre-new-inst.ll b/llvm/test/Transforms/GVN/pre-new-inst.ll index 8e8cea0..0af8ad2 100644 --- a/llvm/test/Transforms/GVN/pre-new-inst.ll +++ b/llvm/test/Transforms/GVN/pre-new-inst.ll @@ -1,7 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S %s | FileCheck %s %MyStruct = type { i32, i32 } + define i8 @foo(i64 %in, ptr %arr, i1 %arg) { +; CHECK-LABEL: define i8 @foo( +; CHECK-SAME: i64 [[IN:%.*]], ptr [[ARR:%.*]], i1 [[ARG:%.*]]) { +; CHECK-NEXT: [[ADDR:%.*]] = alloca [[MYSTRUCT:%.*]], align 8 +; CHECK-NEXT: [[DEAD:%.*]] = trunc i64 [[IN]] to i32 +; CHECK-NEXT: br i1 [[ARG]], label %[[NEXT:.*]], label %[[TMP:.*]] +; CHECK: [[TMP]]: +; CHECK-NEXT: call void @bar() +; CHECK-NEXT: br label %[[NEXT]] +; CHECK: [[NEXT]]: +; CHECK-NEXT: store i64 [[IN]], ptr [[ADDR]], align 4 +; CHECK-NEXT: [[RESPTR:%.*]] = getelementptr i8, ptr [[ARR]], i32 [[DEAD]] +; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[RESPTR]], align 1 +; CHECK-NEXT: ret i8 [[RES]] +; %addr = alloca %MyStruct %dead = trunc i64 %in to i32 br i1 %arg, label %next, label %tmp @@ -16,11 +32,8 @@ next: final: %idx32 = load i32, ptr %addr - -; CHECK: %resptr = getelementptr i8, ptr %arr, i32 %dead %resptr = getelementptr i8, ptr %arr, i32 %idx32 %res = load i8, ptr %resptr - ret i8 %res } diff --git a/llvm/test/Transforms/GVN/propagate-ir-flags.ll b/llvm/test/Transforms/GVN/propagate-ir-flags.ll index 6f4e662..6b11ff5 100644 --- a/llvm/test/Transforms/GVN/propagate-ir-flags.ll +++ b/llvm/test/Transforms/GVN/propagate-ir-flags.ll @@ -1,11 +1,15 @@ - +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s -; CHECK-LABEL: func_fast -; CHECK: fadd fast double -; CHECK-NEXT: store -; CHECK-NEXT: ret define double @func_fast(double %a, double %b) { +; CHECK-LABEL: define double @func_fast( +; CHECK-SAME: double [[A:%.*]], double [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[A_ADDR:%.*]] = alloca double, align 8 +; CHECK-NEXT: [[ADD:%.*]] = fadd fast double [[B]], 3.000000e+00 +; CHECK-NEXT: store double [[ADD]], ptr [[A_ADDR]], align 8 +; CHECK-NEXT: ret double [[ADD]] +; entry: %a.addr = alloca double, align 8 %add = fadd fast double %b, 3.000000e+00 @@ -14,11 +18,15 @@ entry: ret double %load_add } -; CHECK-LABEL: func_no_fast -; CHECK: fadd double -; CHECK-NEXT: store -; CHECK-NEXT: ret define double @func_no_fast(double %a, double %b) { +; CHECK-LABEL: define double @func_no_fast( +; CHECK-SAME: double [[A:%.*]], double [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[A_ADDR:%.*]] = alloca double, align 8 +; CHECK-NEXT: [[ADD:%.*]] = fadd double [[B]], 3.000000e+00 +; CHECK-NEXT: store double [[ADD]], ptr [[A_ADDR]], align 8 +; CHECK-NEXT: ret double [[ADD]] +; entry: %a.addr = alloca double, align 8 %add = fadd fast double %b, 3.000000e+00 @@ -26,4 +34,3 @@ entry: %duplicated_add = fadd double %b, 3.000000e+00 ret double %duplicated_add } - diff --git a/llvm/test/Transforms/GVN/rle-no-phi-translate.ll b/llvm/test/Transforms/GVN/rle-no-phi-translate.ll index 8876665..5b8b4db 100644 --- a/llvm/test/Transforms/GVN/rle-no-phi-translate.ll +++ b/llvm/test/Transforms/GVN/rle-no-phi-translate.ll @@ -1,5 +1,7 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s ; XFAIL: * + ; FIXME: This should be promotable, but memdep/gvn don't track values ; path/edge sensitively enough. @@ -7,22 +9,30 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3 target triple = "i386-apple-darwin7" define i32 @g(ptr %b, ptr %c) nounwind { +; CHECK-LABEL: define i32 @g( +; CHECK-SAME: ptr [[B:%.*]], ptr [[C:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: store i32 1, ptr [[B]], align 4 +; CHECK-NEXT: store i32 2, ptr [[C]], align 4 +; CHECK-NEXT: br i1 false, label %[[BB:.*]], label %[[BB2:.*]] +; CHECK: [[BB]]: +; CHECK-NEXT: br label %[[BB2]] +; CHECK: [[BB2]]: +; CHECK-NEXT: ret i32 [[CV]] +; entry: - store i32 1, ptr %b - store i32 2, ptr %c - - %t1 = icmp eq ptr %b, null ; <i1> [#uses=1] - br i1 %t1, label %bb, label %bb2 + store i32 1, ptr %b + store i32 2, ptr %c + + %t1 = icmp eq ptr %b, null ; <i1> [#uses=1] + br i1 %t1, label %bb, label %bb2 bb: ; preds = %entry - br label %bb2 + br label %bb2 bb2: ; preds = %bb1, %bb - %c_addr.0 = phi ptr [ %b, %entry ], [ %c, %bb ] ; <ptr> [#uses=1] - %cv = load i32, ptr %c_addr.0, align 4 ; <i32> [#uses=1] - ret i32 %cv -; CHECK: bb2: -; CHECK-NOT: load i32 -; CHECK: ret i32 + %c_addr.0 = phi ptr [ %b, %entry ], [ %c, %bb ] ; <ptr> [#uses=1] + %cv = load i32, ptr %c_addr.0, align 4 ; <i32> [#uses=1] + ret i32 %cv } diff --git a/llvm/test/Transforms/GVN/rle-nonlocal.ll b/llvm/test/Transforms/GVN/rle-nonlocal.ll index 06aa188..4cadc40 100644 --- a/llvm/test/Transforms/GVN/rle-nonlocal.ll +++ b/llvm/test/Transforms/GVN/rle-nonlocal.ll @@ -1,22 +1,38 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s define i32 @main(ptr %p, i32 %x, i32 %y) { +; CHECK-LABEL: define i32 @main( +; CHECK-SAME: ptr [[P:%.*]], i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[BLOCK1:.*:]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], [[Y]] +; CHECK-NEXT: br i1 [[CMP]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]] +; CHECK: [[BLOCK2]]: +; CHECK-NEXT: [[A:%.*]] = load ptr, ptr [[P]], align 8 +; CHECK-NEXT: br label %[[BLOCK4:.*]] +; CHECK: [[BLOCK3]]: +; CHECK-NEXT: [[B:%.*]] = load ptr, ptr [[P]], align 8 +; CHECK-NEXT: br label %[[BLOCK4]] +; CHECK: [[BLOCK4]]: +; CHECK-NEXT: [[DEAD:%.*]] = phi ptr [ [[A]], %[[BLOCK2]] ], [ [[B]], %[[BLOCK3]] ] +; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[DEAD]], align 4 +; CHECK-NEXT: [[E:%.*]] = add i32 [[C]], [[C]] +; CHECK-NEXT: ret i32 [[E]] +; block1: - %cmp = icmp eq i32 %x, %y - br i1 %cmp , label %block2, label %block3 + %cmp = icmp eq i32 %x, %y + br i1 %cmp , label %block2, label %block3 block2: - %a = load ptr, ptr %p - br label %block4 + %a = load ptr, ptr %p + br label %block4 block3: %b = load ptr, ptr %p br label %block4 block4: -; CHECK-NOT: %existingPHI = phi -; CHECK: %DEAD = phi - %existingPHI = phi ptr [ %a, %block2 ], [ %b, %block3 ] + %existingPHI = phi ptr [ %a, %block2 ], [ %b, %block3 ] %DEAD = load ptr, ptr %p %c = load i32, ptr %DEAD %d = load i32, ptr %existingPHI diff --git a/llvm/test/Transforms/GVN/simplify-icf-cache-invalidation.ll b/llvm/test/Transforms/GVN/simplify-icf-cache-invalidation.ll index 8332a98..f4a4155 100644 --- a/llvm/test/Transforms/GVN/simplify-icf-cache-invalidation.ll +++ b/llvm/test/Transforms/GVN/simplify-icf-cache-invalidation.ll @@ -1,7 +1,6 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s -; CHECK: define {{.*}}@eggs - %struct.zot = type { ptr } %struct.wombat = type { ptr } %struct.baz = type { i8, ptr } @@ -11,6 +10,28 @@ declare ptr @f() define hidden void @eggs(ptr %arg, i1 %arg2, ptr %arg3, i32 %arg4, ptr %arg5) unnamed_addr align 2 { +; CHECK-LABEL: define hidden void @eggs( +; CHECK-SAME: ptr [[ARG:%.*]], i1 [[ARG2:%.*]], ptr [[ARG3:%.*]], i32 [[ARG4:%.*]], ptr [[ARG5:%.*]]) unnamed_addr align 2 { +; CHECK-NEXT: [[BB:.*:]] +; CHECK-NEXT: [[TMP:%.*]] = alloca [[STRUCT_WOMBAT:%.*]], align 8 +; CHECK-NEXT: store ptr @global, ptr [[ARG]], align 8, !invariant.group [[META0:![0-9]+]] +; CHECK-NEXT: br i1 [[ARG2]], label %[[BB4:.*]], label %[[BB2:.*]] +; CHECK: [[BB2]]: +; CHECK-NEXT: [[TMP3:%.*]] = atomicrmw sub ptr [[ARG3]], i32 [[ARG4]] acq_rel, align 4 +; CHECK-NEXT: br label %[[BB4]] +; CHECK: [[BB4]]: +; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARG5]], align 8 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_BAZ:%.*]], ptr [[TMP5]], i64 0, i32 1 +; CHECK-NEXT: br i1 [[ARG2]], label %[[BB9:.*]], label %[[BB7:.*]] +; CHECK: [[BB7]]: +; CHECK-NEXT: [[TMP8:%.*]] = tail call ptr @f() +; CHECK-NEXT: br label %[[BB9]] +; CHECK: [[BB9]]: +; CHECK-NEXT: tail call void @quux(ptr [[ARG]], i1 [[ARG2]]) +; CHECK-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP]], align 8 +; CHECK-NEXT: [[TMP18:%.*]] = icmp eq ptr [[TMP17]], null +; CHECK-NEXT: ret void +; bb: %tmp = alloca %struct.wombat, align 8 store ptr @global, ptr %arg, align 8, !invariant.group !0 @@ -45,3 +66,6 @@ declare hidden void @quux(ptr, i1) unnamed_addr #0 align 2 attributes #0 = { nounwind willreturn } !0 = !{} +;. +; CHECK: [[META0]] = !{} +;. diff --git a/llvm/test/Transforms/GVN/stale-loop-info.ll b/llvm/test/Transforms/GVN/stale-loop-info.ll index 3d6ec67..e253aea 100644 --- a/llvm/test/Transforms/GVN/stale-loop-info.ll +++ b/llvm/test/Transforms/GVN/stale-loop-info.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes='require<loops>,gvn' -S < %s | FileCheck %s ; This used to fail with ASAN enabled and if for some reason LoopInfo remained @@ -14,6 +15,27 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" declare void @snork.1(ptr) local_unnamed_addr #0 define hidden zeroext i1 @eggs(ptr %arg, i1 %arg2, i1 %arg3) unnamed_addr align 2 { +; CHECK-LABEL: define hidden zeroext i1 @eggs( +; CHECK-SAME: ptr [[ARG:%.*]], i1 [[ARG2:%.*]], i1 [[ARG3:%.*]]) unnamed_addr align 2 { +; CHECK-NEXT: [[BB:.*:]] +; CHECK-NEXT: br i1 [[ARG2]], label %[[BB14:.*]], label %[[BB3:.*]] +; CHECK: [[BB3]]: +; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [[STRUCT_WIBBLE_1028:%.*]], ptr [[ARG]], i64 0, i32 2, i32 0, i32 0, i64 0 +; CHECK-NEXT: br label %[[BB6:.*]] +; CHECK: [[BB6]]: +; CHECK-NEXT: br i1 [[ARG3]], label %[[BB11:.*]], label %[[BB8:.*]] +; CHECK: [[BB8]]: +; CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP]], align 8 +; CHECK-NEXT: br label %[[BB12:.*]] +; CHECK: [[BB11]]: +; CHECK-NEXT: br label %[[BB12]] +; CHECK: [[BB12]]: +; CHECK-NEXT: [[TMP13:%.*]] = phi ptr [ [[TMP]], %[[BB11]] ], [ [[TMP9]], %[[BB8]] ] +; CHECK-NEXT: call void @snork.1(ptr [[TMP13]]) #[[ATTR1:[0-9]+]] +; CHECK-NEXT: br label %[[BB6]] +; CHECK: [[BB14]]: +; CHECK-NEXT: ret i1 false +; bb: br i1 %arg2, label %bb14, label %bb3 @@ -29,7 +51,6 @@ bb7: ; preds = %bb6 bb8: ; preds = %bb7 %tmp9 = load ptr, ptr %tmp, align 8 -; CHECK: %tmp9 = load ptr, ptr %tmp, align 8 br label %bb12 bb11: ; preds = %bb7 diff --git a/llvm/test/Transforms/GVN/unreachable-predecessor.ll b/llvm/test/Transforms/GVN/unreachable-predecessor.ll index 532d554..a584189 100644 --- a/llvm/test/Transforms/GVN/unreachable-predecessor.ll +++ b/llvm/test/Transforms/GVN/unreachable-predecessor.ll @@ -1,13 +1,31 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -passes=gvn -S | FileCheck %s ; loop.then is not reachable from loop, so we should be able to deduce that the ; store through %phi2 cannot alias %ptr1. - -; CHECK-LABEL: @test1 define void @test1(ptr %ptr1, ptr %ptr2) { -; CHECK-LABEL: entry: -; CHECK: %[[GEP:.*]] = getelementptr inbounds i32, ptr %ptr1, i64 1 -; CHECK: %[[VAL1:.*]] = load i32, ptr %[[GEP]] +; CHECK-LABEL: define void @test1( +; CHECK-SAME: ptr [[PTR1:%.*]], ptr [[PTR2:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i64 1 +; CHECK-NEXT: [[VAL1_PRE:%.*]] = load i32, ptr [[GEP1]], align 4 +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[PHI1:%.*]] = phi ptr [ [[GEP1]], %[[ENTRY]] ], [ [[PHI2:%.*]], %[[LOOP_THEN:.*]] ] +; CHECK-NEXT: br i1 false, label %[[LOOP_LOOP_THEN_CRIT_EDGE:.*]], label %[[LOOP_IF:.*]] +; CHECK: [[LOOP_LOOP_THEN_CRIT_EDGE]]: +; CHECK-NEXT: br label %[[LOOP_THEN]] +; CHECK: [[LOOP_IF]]: +; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[GEP1]], i64 1 +; CHECK-NEXT: [[VAL2:%.*]] = load i32, ptr [[GEP2]], align 4 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[VAL1_PRE]], [[VAL2]] +; CHECK-NEXT: br label %[[LOOP_THEN]] +; CHECK: [[LOOP_THEN]]: +; CHECK-NEXT: [[PHI2]] = phi ptr [ poison, %[[LOOP_LOOP_THEN_CRIT_EDGE]] ], [ [[GEP2]], %[[LOOP_IF]] ] +; CHECK-NEXT: store i32 [[VAL1_PRE]], ptr [[PHI2]], align 4 +; CHECK-NEXT: store i32 0, ptr [[PTR1]], align 4 +; CHECK-NEXT: br label %[[LOOP]] +; entry: br label %loop.preheader @@ -15,8 +33,6 @@ loop.preheader: %gep1 = getelementptr inbounds i32, ptr %ptr1, i64 1 br label %loop -; CHECK-LABEL: loop: -; CHECK-NOT: load loop: %phi1 = phi ptr [ %gep1, %loop.preheader ], [ %phi2, %loop.then ] %val1 = load i32, ptr %phi1 @@ -28,8 +44,6 @@ loop.if: %cmp = icmp slt i32 %val1, %val2 br label %loop.then -; CHECK-LABEL: loop.then -; CHECK: store i32 %[[VAL1]], ptr %phi2 loop.then: %phi2 = phi ptr [ %ptr2, %loop ], [ %gep2, %loop.if ] store i32 %val1, ptr %phi2 diff --git a/llvm/test/Transforms/GVN/unreachable_block_infinite_loop.ll b/llvm/test/Transforms/GVN/unreachable_block_infinite_loop.ll index 5de5e03..2743fd0 100644 --- a/llvm/test/Transforms/GVN/unreachable_block_infinite_loop.ll +++ b/llvm/test/Transforms/GVN/unreachable_block_infinite_loop.ll @@ -1,18 +1,40 @@ -; RUN: opt -passes=gvn -disable-output < %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -passes=gvn -S < %s | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-darwin10.0" define i32 @test2() nounwind ssp { +; CHECK-LABEL: define i32 @test2( +; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: ret i32 0 +; CHECK: [[UNREACHABLE_BLOCK:.*:]] +; CHECK-NEXT: [[A:%.*]] = add i32 [[A]], 1 +; CHECK-NEXT: ret i32 [[A]] +; entry: - ret i32 0 + ret i32 0 unreachable_block: - %a = add i32 %a, 1 - ret i32 %a + %a = add i32 %a, 1 + ret i32 %a } define i32 @pr23096_test0(i1 %arg, ptr %arg2) { +; CHECK-LABEL: define i32 @pr23096_test0( +; CHECK-SAME: i1 [[ARG:%.*]], ptr [[ARG2:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[BB0:.*]] +; CHECK: [[BB1:.*]]: +; CHECK-NEXT: [[PTR1:%.*]] = ptrtoint ptr [[PTR2:%.*]] to i64 +; CHECK-NEXT: [[PTR2]] = inttoptr i64 [[PTR1]] to ptr +; CHECK-NEXT: br i1 [[ARG]], label %[[BB0]], label %[[BB1]] +; CHECK: [[BB0]]: +; CHECK-NEXT: [[PHI:%.*]] = phi ptr [ [[ARG2]], %[[ENTRY]] ], [ [[PTR2]], %[[BB1]] ] +; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[PHI]], align 4 +; CHECK-NEXT: ret i32 [[LOAD]] +; entry: br label %bb0 @@ -28,6 +50,19 @@ bb0: } define i32 @pr23096_test1(i1 %arg, ptr %arg2) { +; CHECK-LABEL: define i32 @pr23096_test1( +; CHECK-SAME: i1 [[ARG:%.*]], ptr [[ARG2:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[BB0:.*]] +; CHECK: [[BB1:.*]]: +; CHECK-NEXT: [[PTR1:%.*]] = getelementptr i32, ptr [[PTR2:%.*]], i32 0 +; CHECK-NEXT: [[PTR2]] = getelementptr i32, ptr [[PTR1]], i32 0 +; CHECK-NEXT: br i1 [[ARG]], label %[[BB0]], label %[[BB1]] +; CHECK: [[BB0]]: +; CHECK-NEXT: [[PHI:%.*]] = phi ptr [ [[ARG2]], %[[ENTRY]] ], [ [[PTR2]], %[[BB1]] ] +; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[PHI]], align 4 +; CHECK-NEXT: ret i32 [[LOAD]] +; entry: br label %bb0 diff --git a/llvm/test/Transforms/GVN/volatile-nonvolatile.ll b/llvm/test/Transforms/GVN/volatile-nonvolatile.ll index 72c6a30..d34c891 100644 --- a/llvm/test/Transforms/GVN/volatile-nonvolatile.ll +++ b/llvm/test/Transforms/GVN/volatile-nonvolatile.ll @@ -1,13 +1,19 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -passes=gvn -S < %s | FileCheck %s %struct.t = type { ptr } ; The loaded address and the location of the address itself are not aliased, ; so the second reload is not necessary. Check that it can be eliminated. -; CHECK-LABEL: test1 -; CHECK: load -; CHECK-NOT: load define void @test1(ptr nocapture readonly %p, i32 %v) #0 { +; CHECK-LABEL: define void @test1( +; CHECK-SAME: ptr readonly captures(none) [[P:%.*]], i32 [[V:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P]], align 4, !tbaa [[ANYPTR_TBAA0:![0-9]+]] +; CHECK-NEXT: store volatile i32 [[V]], ptr [[TMP0]], align 4, !tbaa [[INT_TBAA5:![0-9]+]] +; CHECK-NEXT: store volatile i32 [[V]], ptr [[TMP0]], align 4, !tbaa [[INT_TBAA5]] +; CHECK-NEXT: ret void +; entry: %0 = load ptr, ptr %p, align 4, !tbaa !1 store volatile i32 %v, ptr %0, align 4, !tbaa !6 @@ -18,11 +24,16 @@ entry: ; The store via the loaded address may overwrite the address itself. ; Make sure that both loads remain. -; CHECK-LABEL: test2 -; CHECK: load -; CHECK: store -; CHECK: load define void @test2(ptr nocapture readonly %p, i32 %v) #0 { +; CHECK-LABEL: define void @test2( +; CHECK-SAME: ptr readonly captures(none) [[P:%.*]], i32 [[V:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P]], align 4, !tbaa [[ANYPTR_TBAA0]] +; CHECK-NEXT: store volatile i32 [[V]], ptr [[TMP0]], align 4, !tbaa [[ANYPTR_TBAA0]] +; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[P]], align 4, !tbaa [[ANYPTR_TBAA0]] +; CHECK-NEXT: store volatile i32 [[V]], ptr [[TMP1]], align 4, !tbaa [[ANYPTR_TBAA0]] +; CHECK-NEXT: ret void +; entry: %0 = load ptr, ptr %p, align 4, !tbaa !1 store volatile i32 %v, ptr %0, align 4, !tbaa !1 @@ -33,11 +44,16 @@ entry: ; The loads are ordered and non-monotonic. Although they are not aliased to ; the stores, make sure both are preserved. -; CHECK-LABEL: test3 -; CHECK: load -; CHECK: store -; CHECK: load define void @test3(ptr nocapture readonly %p, i32 %v) #0 { +; CHECK-LABEL: define void @test3( +; CHECK-SAME: ptr readonly captures(none) [[P:%.*]], i32 [[V:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = load atomic ptr, ptr [[P]] acquire, align 4, !tbaa [[ANYPTR_TBAA0]] +; CHECK-NEXT: store volatile i32 [[V]], ptr [[TMP0]], align 4, !tbaa [[INT_TBAA5]] +; CHECK-NEXT: [[TMP1:%.*]] = load atomic ptr, ptr [[P]] acquire, align 4, !tbaa [[ANYPTR_TBAA0]] +; CHECK-NEXT: store volatile i32 [[V]], ptr [[TMP1]], align 4, !tbaa [[INT_TBAA5]] +; CHECK-NEXT: ret void +; entry: %0 = load atomic ptr, ptr %p acquire, align 4, !tbaa !1 store volatile i32 %v, ptr %0, align 4, !tbaa !6 @@ -56,3 +72,12 @@ attributes #0 = { norecurse nounwind } !6 = !{!7, !7, i64 0} !7 = !{!"int", !4, i64 0} +;. +; CHECK: [[ANYPTR_TBAA0]] = !{[[META1:![0-9]+]], [[META2:![0-9]+]], i64 0} +; CHECK: [[META1]] = !{!"", [[META2]], i64 0} +; CHECK: [[META2]] = !{!"any pointer", [[META3:![0-9]+]], i64 0} +; CHECK: [[META3]] = !{!"omnipotent char", [[META4:![0-9]+]], i64 0} +; CHECK: [[META4]] = !{!"Simple C/C++ TBAA"} +; CHECK: [[INT_TBAA5]] = !{[[META6:![0-9]+]], [[META6]], i64 0} +; CHECK: [[META6]] = !{!"int", [[META3]], i64 0} +;. diff --git a/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-gep-constexpr.ll b/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-gep-constexpr.ll index 26728a7..70c8fe6 100644 --- a/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-gep-constexpr.ll +++ b/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users-gep-constexpr.ll @@ -44,16 +44,6 @@ entry: ret void } -define void @stores_ptrtoint_constexpr() { -; CHECK-LABEL: @stores_ptrtoint_constexpr( -; CHECK-NEXT: entry: -; CHECK-NEXT: ret void -; -entry: - store i32 0, ptr inttoptr (i64 ptrtoint (ptr @global.20ptr to i64) to ptr), align 8 - ret void -} - @gv = internal unnamed_addr global [3 x ptr] zeroinitializer, align 16 @gv2 = internal unnamed_addr global i32 0, align 4 diff --git a/llvm/test/Transforms/HardwareLoops/ARM/structure.ll b/llvm/test/Transforms/HardwareLoops/ARM/structure.ll index cb66fef..6993fd1 100644 --- a/llvm/test/Transforms/HardwareLoops/ARM/structure.ll +++ b/llvm/test/Transforms/HardwareLoops/ARM/structure.ll @@ -321,10 +321,10 @@ for.inc: ; preds = %sw.bb, %sw.bb1, %fo ; CHECK-UNROLL-NOT: dls ; CHECK-UNROLL: [[LOOP:.LBB[0-9_]+]]: @ %for.body ; CHECK-UNROLL: le lr, [[LOOP]] -; CHECK-UNROLL: wls lr, r12, [[EXIT:.LBB[0-9_]+]] +; CHECK-UNROLL: dls lr, r12 ; CHECK-UNROLL: [[EPIL:.LBB[0-9_]+]]: ; CHECK-UNROLL: le lr, [[EPIL]] -; CHECK-UNROLL-NEXT: [[EXIT]] +; CHECK-UNROLL-NEXT: {{\.LBB[0-9_]+}}: @ %for.cond.cleanup define void @unroll_inc_int(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) { entry: @@ -357,10 +357,10 @@ for.body: ; CHECK-UNROLL-NOT: dls ; CHECK-UNROLL: [[LOOP:.LBB[0-9_]+]]: @ %for.body ; CHECK-UNROLL: le lr, [[LOOP]] -; CHECK-UNROLL: wls lr, r12, [[EPIL_EXIT:.LBB[0-9_]+]] +; CHECK-UNROLL: dls lr, r12 ; CHECK-UNROLL: [[EPIL:.LBB[0-9_]+]]: ; CHECK-UNROLL: le lr, [[EPIL]] -; CHECK-UNROLL: [[EPIL_EXIT]]: +; CHECK-UNROLL: {{\.LBB[0-9_]+}}: @ %for.cond.cleanup ; CHECK-UNROLL: pop define void @unroll_inc_unsigned(ptr nocapture %a, ptr nocapture readonly %b, ptr nocapture readonly %c, i32 %N) { entry: diff --git a/llvm/test/Transforms/Inline/attributes.ll b/llvm/test/Transforms/Inline/attributes.ll index 42b1a3a..55ab430 100644 --- a/llvm/test/Transforms/Inline/attributes.ll +++ b/llvm/test/Transforms/Inline/attributes.ll @@ -26,6 +26,10 @@ define i32 @sanitize_memtag_callee(i32 %i) sanitize_memtag { ret i32 %i } +define i32 @sanitize_alloc_token_callee(i32 %i) sanitize_alloc_token { + ret i32 %i +} + define i32 @safestack_callee(i32 %i) safestack { ret i32 %i } @@ -58,6 +62,10 @@ define i32 @alwaysinline_sanitize_memtag_callee(i32 %i) alwaysinline sanitize_me ret i32 %i } +define i32 @alwaysinline_sanitize_alloc_token_callee(i32 %i) alwaysinline sanitize_alloc_token { + ret i32 %i +} + define i32 @alwaysinline_safestack_callee(i32 %i) alwaysinline safestack { ret i32 %i } @@ -184,6 +192,39 @@ define i32 @test_sanitize_memtag(i32 %arg) sanitize_memtag { ; CHECK-NEXT: ret i32 } +; ---------------------------------------------------------------------------- ; + +; Can inline sanitize_alloc_token functions into a noattr function. The +; attribute is *not* viral, otherwise may break code. +define i32 @test_no_sanitize_alloc_token(i32 %arg) { +; CHECK-LABEL: @test_no_sanitize_alloc_token( +; CHECK-SAME: ) { +; CHECK-NOT: call +; CHECK: ret i32 +entry: + %x1 = call i32 @noattr_callee(i32 %arg) + %x2 = call i32 @sanitize_alloc_token_callee(i32 %x1) + %x3 = call i32 @alwaysinline_callee(i32 %x2) + %x4 = call i32 @alwaysinline_sanitize_alloc_token_callee(i32 %x3) + ret i32 %x4 +} + +; Can inline noattr functions into a sanitize_alloc_token function. If +; inlinable noattr functions cannot be instrumented, they should be marked with +; explicit noinline. +define i32 @test_sanitize_alloc_token(i32 %arg) sanitize_alloc_token { +; CHECK-LABEL: @test_sanitize_alloc_token( +; CHECK-SAME: ) [[SANITIZE_ALLOC_TOKEN:.*]] { +; CHECK-NOT: call +; CHECK: ret i32 +entry: + %x1 = call i32 @noattr_callee(i32 %arg) + %x2 = call i32 @sanitize_alloc_token_callee(i32 %x1) + %x3 = call i32 @alwaysinline_callee(i32 %x2) + %x4 = call i32 @alwaysinline_sanitize_alloc_token_callee(i32 %x3) + ret i32 %x4 +} + define i32 @test_safestack(i32 %arg) safestack { %x1 = call i32 @noattr_callee(i32 %arg) %x2 = call i32 @safestack_callee(i32 %x1) @@ -639,6 +680,7 @@ define i32 @loader_replaceable_caller() { ret i32 %1 } +; CHECK: attributes [[SANITIZE_ALLOC_TOKEN]] = { sanitize_alloc_token } ; CHECK: attributes [[SLH]] = { speculative_load_hardening } ; CHECK: attributes [[FPMAD_FALSE]] = { "less-precise-fpmad"="false" } ; CHECK: attributes [[FPMAD_TRUE]] = { "less-precise-fpmad"="true" } diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/fmed3.ll b/llvm/test/Transforms/InstCombine/AMDGPU/fmed3.ll index 361a2b8..378ca1f 100644 --- a/llvm/test/Transforms/InstCombine/AMDGPU/fmed3.ll +++ b/llvm/test/Transforms/InstCombine/AMDGPU/fmed3.ll @@ -269,42 +269,27 @@ define float @fmed3_constant_src2_1_f32(float %x, float %y) #1 { } define float @fmed3_x_qnan0_qnan1_f32(float %x) #1 { -; IEEE1-LABEL: define float @fmed3_x_qnan0_qnan1_f32( -; IEEE1-SAME: float [[X:%.*]]) #[[ATTR1]] { -; IEEE1-NEXT: ret float [[X]] -; -; IEEE0-LABEL: define float @fmed3_x_qnan0_qnan1_f32( -; IEEE0-SAME: float [[X:%.*]]) #[[ATTR1]] { -; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FF8002000000000) -; IEEE0-NEXT: ret float [[MED3]] +; CHECK-LABEL: define float @fmed3_x_qnan0_qnan1_f32( +; CHECK-SAME: float [[X:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: ret float [[X]] ; %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF8001000000000, float 0x7FF8002000000000) ret float %med3 } define float @fmed3_qnan0_x_qnan1_f32(float %x) #1 { -; IEEE1-LABEL: define float @fmed3_qnan0_x_qnan1_f32( -; IEEE1-SAME: float [[X:%.*]]) #[[ATTR1]] { -; IEEE1-NEXT: ret float [[X]] -; -; IEEE0-LABEL: define float @fmed3_qnan0_x_qnan1_f32( -; IEEE0-SAME: float [[X:%.*]]) #[[ATTR1]] { -; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FF8002000000000) -; IEEE0-NEXT: ret float [[MED3]] +; CHECK-LABEL: define float @fmed3_qnan0_x_qnan1_f32( +; CHECK-SAME: float [[X:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: ret float [[X]] ; %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8001000000000, float %x, float 0x7FF8002000000000) ret float %med3 } define float @fmed3_qnan0_qnan1_x_f32(float %x) #1 { -; IEEE1-LABEL: define float @fmed3_qnan0_qnan1_x_f32( -; IEEE1-SAME: float [[X:%.*]]) #[[ATTR1]] { -; IEEE1-NEXT: ret float [[X]] -; -; IEEE0-LABEL: define float @fmed3_qnan0_qnan1_x_f32( -; IEEE0-SAME: float [[X:%.*]]) #[[ATTR1]] { -; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FF8002000000000) -; IEEE0-NEXT: ret float [[MED3]] +; CHECK-LABEL: define float @fmed3_qnan0_qnan1_x_f32( +; CHECK-SAME: float [[X:%.*]]) #[[ATTR1]] { +; CHECK-NEXT: ret float [[X]] ; %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF8001000000000, float 0x7FF8002000000000, float %x) ret float %med3 @@ -448,8 +433,7 @@ define float @fmed3_snan1_x_snan2_f32(float %x) #1 { ; ; IEEE0-LABEL: define float @fmed3_snan1_x_snan2_f32( ; IEEE0-SAME: float [[X:%.*]]) #[[ATTR1]] { -; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FF0000040000000) -; IEEE0-NEXT: ret float [[MED3]] +; IEEE0-NEXT: ret float [[X]] ; %med3 = call float @llvm.amdgcn.fmed3.f32(float 0x7FF0000020000000, float %x, float 0x7FF0000040000000) ret float %med3 @@ -462,8 +446,7 @@ define float @fmed3_x_snan1_snan2_f32(float %x) #1 { ; ; IEEE0-LABEL: define float @fmed3_x_snan1_snan2_f32( ; IEEE0-SAME: float [[X:%.*]]) #[[ATTR1]] { -; IEEE0-NEXT: [[MED3:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FF0000040000000) -; IEEE0-NEXT: ret float [[MED3]] +; IEEE0-NEXT: ret float [[X]] ; %med3 = call float @llvm.amdgcn.fmed3.f32(float %x, float 0x7FF0000020000000, float 0x7FF0000040000000) ret float %med3 diff --git a/llvm/test/Transforms/InstSimplify/fminmax-folds.ll b/llvm/test/Transforms/InstSimplify/fminmax-folds.ll index 26b5114..3a03f86 100644 --- a/llvm/test/Transforms/InstSimplify/fminmax-folds.ll +++ b/llvm/test/Transforms/InstSimplify/fminmax-folds.ll @@ -6,12 +6,12 @@ ;############################################################### ; minnum(X, qnan) -> X ; maxnum(X, qnan) -> X -; TODO: minnum(X, snan) -> qnan (currently we treat SNaN the same as QNaN) -; TODO: maxnum(X, snan) -> qnan (currently we treat SNaN the same as QNaN) +; minnum(X, snan) -> qnan +; maxnum(X, snan) -> qnan ; minimum(X, nan) -> qnan ; maximum(X, nan) -> qnan -; TODO: minimumnum(X, nan) -> X -; TODO: maximumnum(X, nan) -> X +; minimumnum(X, nan) -> X +; maximumnum(X, nan) -> X define void @minmax_qnan_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { ; CHECK-LABEL: @minmax_qnan_f32( @@ -19,10 +19,8 @@ define void @minmax_qnan_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %mi ; CHECK-NEXT: store float [[X]], ptr [[MAXNUM_RES:%.*]], align 4 ; CHECK-NEXT: store float 0x7FFF000000000000, ptr [[MINIMUM_RES:%.*]], align 4 ; CHECK-NEXT: store float 0x7FFF000000000000, ptr [[MAXIMUM_RES:%.*]], align 4 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FFF000000000000) -; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float 0x7FFF000000000000) -; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 ; CHECK-NEXT: ret void ; %minnum = call float @llvm.minnum.f32(float %x, float 0x7FFF000000000000) @@ -42,17 +40,15 @@ define void @minmax_qnan_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %mi ret void } -; TODO currently snan is treated the same as qnan, but maxnum/minnum should really return qnan for these cases, not X +; Note that maxnum/minnum return qnan here for snan inputs, unlike maximumnum/minimumnum define void @minmax_snan_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { ; CHECK-LABEL: @minmax_snan_f32( -; CHECK-NEXT: store float [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 4 -; CHECK-NEXT: store float [[X]], ptr [[MAXNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float 0x7FFC000000000000, ptr [[MINNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float 0x7FFC000000000000, ptr [[MAXNUM_RES:%.*]], align 4 ; CHECK-NEXT: store float 0x7FFC000000000000, ptr [[MINIMUM_RES:%.*]], align 4 ; CHECK-NEXT: store float 0x7FFC000000000000, ptr [[MAXIMUM_RES:%.*]], align 4 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FF4000000000000) -; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float 0x7FF4000000000000) -; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X:%.*]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 ; CHECK-NEXT: ret void ; %minnum = call float @llvm.minnum.f32(float %x, float 0x7FF4000000000000) @@ -78,10 +74,8 @@ define void @minmax_qnan_nxv2f64_op0(<vscale x 2 x double> %x, ptr %minnum_res, ; CHECK-NEXT: store <vscale x 2 x double> [[X]], ptr [[MAXNUM_RES:%.*]], align 16 ; CHECK-NEXT: store <vscale x 2 x double> splat (double 0x7FF8000DEAD00000), ptr [[MINIMUM_RES:%.*]], align 16 ; CHECK-NEXT: store <vscale x 2 x double> splat (double 0x7FF8000DEAD00000), ptr [[MAXIMUM_RES:%.*]], align 16 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call <vscale x 2 x double> @llvm.minimumnum.nxv2f64(<vscale x 2 x double> splat (double 0x7FF8000DEAD00000), <vscale x 2 x double> [[X]]) -; CHECK-NEXT: store <vscale x 2 x double> [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 16 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call <vscale x 2 x double> @llvm.maximumnum.nxv2f64(<vscale x 2 x double> splat (double 0x7FF8000DEAD00000), <vscale x 2 x double> [[X]]) -; CHECK-NEXT: store <vscale x 2 x double> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <vscale x 2 x double> [[X]], ptr [[MINIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <vscale x 2 x double> [[X]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 ; CHECK-NEXT: ret void ; %minnum = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> splat (double 0x7FF8000DEAD00000), <vscale x 2 x double> %x) @@ -101,17 +95,15 @@ define void @minmax_qnan_nxv2f64_op0(<vscale x 2 x double> %x, ptr %minnum_res, ret void } -; TODO currently snan is treated the same as qnan, but maxnum/minnum should really return qnan for these cases, not X +; Note that maxnum/minnum return qnan here for snan inputs, unlike maximumnum/minimumnum define void @minmax_snan_nxv2f64_op1(<vscale x 2 x double> %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { ; CHECK-LABEL: @minmax_snan_nxv2f64_op1( -; CHECK-NEXT: store <vscale x 2 x double> [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 16 -; CHECK-NEXT: store <vscale x 2 x double> [[X]], ptr [[MAXNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <vscale x 2 x double> splat (double 0x7FFC00DEAD00DEAD), ptr [[MINNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <vscale x 2 x double> splat (double 0x7FFC00DEAD00DEAD), ptr [[MAXNUM_RES:%.*]], align 16 ; CHECK-NEXT: store <vscale x 2 x double> splat (double 0x7FFC00DEAD00DEAD), ptr [[MINIMUM_RES:%.*]], align 16 ; CHECK-NEXT: store <vscale x 2 x double> splat (double 0x7FFC00DEAD00DEAD), ptr [[MAXIMUM_RES:%.*]], align 16 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call <vscale x 2 x double> @llvm.minimumnum.nxv2f64(<vscale x 2 x double> splat (double 0x7FF400DEAD00DEAD), <vscale x 2 x double> [[X]]) -; CHECK-NEXT: store <vscale x 2 x double> [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 16 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call <vscale x 2 x double> @llvm.maximumnum.nxv2f64(<vscale x 2 x double> splat (double 0x7FF400DEAD00DEAD), <vscale x 2 x double> [[X]]) -; CHECK-NEXT: store <vscale x 2 x double> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <vscale x 2 x double> [[X:%.*]], ptr [[MINIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <vscale x 2 x double> [[X]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 ; CHECK-NEXT: ret void ; %minnum = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> splat (double 0x7FF400DEAD00DEAD), <vscale x 2 x double> %x) @@ -131,17 +123,18 @@ define void @minmax_snan_nxv2f64_op1(<vscale x 2 x double> %x, ptr %minnum_res, ret void } -; TODO Currently, we treat SNaN and QNaN the same. However, for maxnum and minnum, we should not optimize this, as we should return <%x0, QNaN> instead of <%x0, %x1> +; For maxnum and minnum, we cannot optimize this in InstSimplify, as the result should +; return <%x0, QNaN> and InstSimplify cannot create the extra instructions required to construct this. define void @minmax_mixed_snan_qnan_v2f64(<2 x double> %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { ; CHECK-LABEL: @minmax_mixed_snan_qnan_v2f64( -; CHECK-NEXT: store <2 x double> [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 16 -; CHECK-NEXT: store <2 x double> [[X]], ptr [[MAXNUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MINNUM:%.*]] = call <2 x double> @llvm.minnum.v2f64(<2 x double> <double 0x7FF400DEAD00DEAD, double 0x7FF8000FEED00000>, <2 x double> [[X:%.*]]) +; CHECK-NEXT: store <2 x double> [[MINNUM]], ptr [[MINNUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MAXNUM:%.*]] = call <2 x double> @llvm.maxnum.v2f64(<2 x double> <double 0x7FF400DEAD00DEAD, double 0x7FF8000FEED00000>, <2 x double> [[X]]) +; CHECK-NEXT: store <2 x double> [[MAXNUM]], ptr [[MAXNUM_RES:%.*]], align 16 ; CHECK-NEXT: store <2 x double> <double 0x7FFC00DEAD00DEAD, double 0x7FF8000FEED00000>, ptr [[MINIMUM_RES:%.*]], align 16 ; CHECK-NEXT: store <2 x double> <double 0x7FFC00DEAD00DEAD, double 0x7FF8000FEED00000>, ptr [[MAXIMUM_RES:%.*]], align 16 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> <double 0x7FF400DEAD00DEAD, double 0x7FF8000FEED00000>, <2 x double> [[X]]) -; CHECK-NEXT: store <2 x double> [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 16 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> <double 0x7FF400DEAD00DEAD, double 0x7FF8000FEED00000>, <2 x double> [[X]]) -; CHECK-NEXT: store <2 x double> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> [[X]], ptr [[MINIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> [[X]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 ; CHECK-NEXT: ret void ; %minnum = call <2 x double> @llvm.minnum.v2f64(<2 x double> <double 0x7FF400DEAD00DEAD, double 0x7FF8000FEED00000>, <2 x double> %x) @@ -169,10 +162,8 @@ define void @minmax_mixed_qnan_poison_v2f64(<2 x double> %x, ptr %minnum_res, pt ; CHECK-NEXT: store <2 x double> [[X]], ptr [[MAXNUM_RES:%.*]], align 16 ; CHECK-NEXT: store <2 x double> <double poison, double 0x7FF8000DEAD00000>, ptr [[MINIMUM_RES:%.*]], align 16 ; CHECK-NEXT: store <2 x double> <double poison, double 0x7FF8000DEAD00000>, ptr [[MAXIMUM_RES:%.*]], align 16 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> <double poison, double 0x7FF8000DEAD00000>, <2 x double> [[X]]) -; CHECK-NEXT: store <2 x double> [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 16 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> <double poison, double 0x7FF8000DEAD00000>, <2 x double> [[X]]) -; CHECK-NEXT: store <2 x double> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> [[X]], ptr [[MINIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> [[X]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 ; CHECK-NEXT: ret void ; %minnum = call <2 x double> @llvm.minnum.v2f64(<2 x double> <double poison, double 0x7FF8000DEAD00000>, <2 x double> %x) @@ -201,10 +192,8 @@ define void @minmax_poison_op0_f16(half %x, ptr %minnum_res, ptr %maxnum_res, pt ; CHECK-NEXT: store half [[X]], ptr [[MAXNUM_RES:%.*]], align 2 ; CHECK-NEXT: store half [[X]], ptr [[MINIMUM_RES:%.*]], align 2 ; CHECK-NEXT: store half [[X]], ptr [[MAXIMUM_RES:%.*]], align 2 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call half @llvm.minimumnum.f16(half poison, half [[X]]) -; CHECK-NEXT: store half [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 2 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call half @llvm.maximumnum.f16(half poison, half [[X]]) -; CHECK-NEXT: store half [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 2 +; CHECK-NEXT: store half [[X]], ptr [[MINIMUMNUM_RES:%.*]], align 2 +; CHECK-NEXT: store half [[X]], ptr [[MAXIMUMNUM_RES:%.*]], align 2 ; CHECK-NEXT: ret void ; %minnum = call half @llvm.minnum.f16(half poison, half %x) @@ -230,10 +219,8 @@ define void @minmax_poison_op1_nxv2f64(<vscale x 2 x double> %x, ptr %minnum_res ; CHECK-NEXT: store <vscale x 2 x double> [[X]], ptr [[MAXNUM_RES:%.*]], align 16 ; CHECK-NEXT: store <vscale x 2 x double> [[X]], ptr [[MINIMUM_RES:%.*]], align 16 ; CHECK-NEXT: store <vscale x 2 x double> [[X]], ptr [[MAXIMUM_RES:%.*]], align 16 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call nnan <vscale x 2 x double> @llvm.minimumnum.nxv2f64(<vscale x 2 x double> [[X]], <vscale x 2 x double> poison) -; CHECK-NEXT: store <vscale x 2 x double> [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 16 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call nnan <vscale x 2 x double> @llvm.maximumnum.nxv2f64(<vscale x 2 x double> [[X]], <vscale x 2 x double> poison) -; CHECK-NEXT: store <vscale x 2 x double> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <vscale x 2 x double> [[X]], ptr [[MINIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <vscale x 2 x double> [[X]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 ; CHECK-NEXT: ret void ; %minnum = call nnan <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x double> poison) @@ -260,10 +247,10 @@ define void @minmax_poison_op1_nxv2f64(<vscale x 2 x double> %x, ptr %minnum_res ; minnum(X, +inf) -> X if nnan (ignoring NaN quieting) ; maximum(X, +inf) -> +inf if nnan ; minimum(X, +inf) -> X (ignoring NaN quieting) -; TODO: maximumnum(X, +inf) -> +inf -; TODO: minimumnum(X, +inf) -> X if nnan (ignoring NaN quieting) +; maximumnum(X, +inf) -> +inf +; minimumnum(X, +inf) -> X if nnan (ignoring NaN quieting) -; Can only optimize maxnum and minimum without the nnan flag +; Can only optimize maxnum, minimum, and maximumnum without the nnan flag define void @minmax_pos_inf_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { ; CHECK-LABEL: @minmax_pos_inf_f32( ; CHECK-NEXT: [[MINNUM:%.*]] = call float @llvm.minnum.f32(float [[X:%.*]], float 0x7FF0000000000000) @@ -274,8 +261,7 @@ define void @minmax_pos_inf_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr ; CHECK-NEXT: store float [[MAXIMUM]], ptr [[MAXIMUM_RES:%.*]], align 4 ; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0x7FF0000000000000) ; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float 0x7FF0000000000000) -; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float 0x7FF0000000000000, ptr [[MAXIMUMNUM_RES:%.*]], align 4 ; CHECK-NEXT: ret void ; %minnum = call float @llvm.minnum.f32(float %x, float 0x7FF0000000000000) @@ -296,17 +282,14 @@ define void @minmax_pos_inf_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr } ; Can optimize all minmax variants if the nnan flag is set -; TODO maximumnum/minimumnum define void @minmax_pos_inf_nnan_v2f32(<2 x float> %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { ; CHECK-LABEL: @minmax_pos_inf_nnan_v2f32( ; CHECK-NEXT: store <2 x float> [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 8 ; CHECK-NEXT: store <2 x float> splat (float 0x7FF0000000000000), ptr [[MAXNUM_RES:%.*]], align 8 ; CHECK-NEXT: store <2 x float> [[X]], ptr [[MINIMUM_RES:%.*]], align 8 ; CHECK-NEXT: store <2 x float> splat (float 0x7FF0000000000000), ptr [[MAXIMUM_RES:%.*]], align 8 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call nnan <2 x float> @llvm.minimumnum.v2f32(<2 x float> splat (float 0x7FF0000000000000), <2 x float> [[X]]) -; CHECK-NEXT: store <2 x float> [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 8 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call nnan <2 x float> @llvm.maximumnum.v2f32(<2 x float> splat (float 0x7FF0000000000000), <2 x float> [[X]]) -; CHECK-NEXT: store <2 x float> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: store <2 x float> [[X]], ptr [[MINIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: store <2 x float> splat (float 0x7FF0000000000000), ptr [[MAXIMUMNUM_RES:%.*]], align 8 ; CHECK-NEXT: ret void ; %minnum = call nnan <2 x float> @llvm.minnum.v2f32(<2 x float> splat (float 0x7FF0000000000000), <2 x float> %x) @@ -333,10 +316,10 @@ define void @minmax_pos_inf_nnan_v2f32(<2 x float> %x, ptr %minnum_res, ptr %max ; maxnum(X, -inf) -> X if nnan ; minimum(X, -inf) -> -inf if nnan ; maximum(X, -inf) -> X (Ignoring NaN quieting) -; TODO: minimumnum(X, -inf) -> -inf -; TODO: maximumnum(X, -inf) -> X if nnan +; minimumnum(X, -inf) -> -inf +; maximumnum(X, -inf) -> X if nnan -; Can only optimize minnum and maximum without the nnan flag +; Can only optimize minnum, maximum, and minimumnum without the nnan flag define void @minmax_neg_inf_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { ; CHECK-LABEL: @minmax_neg_inf_f32( ; CHECK-NEXT: store float 0xFFF0000000000000, ptr [[MINNUM_RES:%.*]], align 4 @@ -345,8 +328,7 @@ define void @minmax_neg_inf_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr ; CHECK-NEXT: [[MINIMUM:%.*]] = call float @llvm.minimum.f32(float [[X]], float 0xFFF0000000000000) ; CHECK-NEXT: store float [[MINIMUM]], ptr [[MINIMUM_RES:%.*]], align 4 ; CHECK-NEXT: store float [[X]], ptr [[MAXIMUM_RES:%.*]], align 4 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float 0xFFF0000000000000) -; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float 0xFFF0000000000000, ptr [[MINIMUMNUM_RES:%.*]], align 4 ; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float 0xFFF0000000000000) ; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 ; CHECK-NEXT: ret void @@ -369,17 +351,14 @@ define void @minmax_neg_inf_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr } ; Can optimize all minmax variants if the nnan flag is set -; TODO maximumnum/minimumnum define void @minmax_neg_inf_nnan_v2f64(<2 x double> %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { ; CHECK-LABEL: @minmax_neg_inf_nnan_v2f64( ; CHECK-NEXT: store <2 x double> splat (double 0xFFF0000000000000), ptr [[MINNUM_RES:%.*]], align 16 ; CHECK-NEXT: store <2 x double> [[X:%.*]], ptr [[MAXNUM_RES:%.*]], align 16 ; CHECK-NEXT: store <2 x double> splat (double 0xFFF0000000000000), ptr [[MINIMUM_RES:%.*]], align 16 ; CHECK-NEXT: store <2 x double> [[X]], ptr [[MAXIMUM_RES:%.*]], align 16 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call nnan <2 x double> @llvm.minimumnum.v2f64(<2 x double> [[X]], <2 x double> splat (double 0xFFF0000000000000)) -; CHECK-NEXT: store <2 x double> [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 16 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call nnan <2 x double> @llvm.maximumnum.v2f64(<2 x double> [[X]], <2 x double> splat (double 0xFFF0000000000000)) -; CHECK-NEXT: store <2 x double> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> splat (double 0xFFF0000000000000), ptr [[MINIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> [[X]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 ; CHECK-NEXT: ret void ; %minnum = call nnan <2 x double> @llvm.minnum.v2f64(<2 x double> %x, <2 x double> splat (double 0xFFF0000000000000)) @@ -406,8 +385,8 @@ define void @minmax_neg_inf_nnan_v2f64(<2 x double> %x, ptr %minnum_res, ptr %ma ; minnum(X, +largest) -> X if ninf && nnan ; maximum(X, +largest) -> +largest if ninf && nnan ; minimum(X, +largest) -> X if ninf (ignoring quieting of sNaNs) -; TODO: maximumnum(X, +largest) -> +largest if ninf && nnan -; TODO: minimumnum(X, +largest) -> X if ninf && nnan +; maximumnum(X, +largest) -> +largest if ninf +; minimumnum(X, +largest) -> X if ninf && nnan ; None of these should be optimized away without the nnan/ninf flags define void @minmax_largest_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { @@ -443,7 +422,7 @@ define void @minmax_largest_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr ret void } -; We can optimize maxnum and minimum if we know ninf is set +; We can optimize maxnum, minimum, and maximumnum if we know ninf is set define void @minmax_largest_f32_ninf(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { ; CHECK-LABEL: @minmax_largest_f32_ninf( ; CHECK-NEXT: [[MINNUM:%.*]] = call ninf float @llvm.minnum.f32(float [[X:%.*]], float 0x47EFFFFFE0000000) @@ -454,8 +433,7 @@ define void @minmax_largest_f32_ninf(float %x, ptr %minnum_res, ptr %maxnum_res, ; CHECK-NEXT: store float [[MAXIMUM]], ptr [[MAXIMUM_RES:%.*]], align 4 ; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call ninf float @llvm.minimumnum.f32(float [[X]], float 0x47EFFFFFE0000000) ; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call ninf float @llvm.maximumnum.f32(float [[X]], float 0x47EFFFFFE0000000) -; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float 0x47EFFFFFE0000000, ptr [[MAXIMUMNUM_RES:%.*]], align 4 ; CHECK-NEXT: ret void ; %minnum = call ninf float @llvm.minnum.f32(float %x, float 0x47EFFFFFE0000000) @@ -476,17 +454,14 @@ define void @minmax_largest_f32_ninf(float %x, ptr %minnum_res, ptr %maxnum_res, } ; All can be optimized if both the ninf and nnan flags are set (ignoring SNaN propagation in minnum/maxnum) -; TODO maximumnum/minimumnum define void @minmax_largest_v2f32_ninf_nnan(<2 x float> %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { ; CHECK-LABEL: @minmax_largest_v2f32_ninf_nnan( ; CHECK-NEXT: store <2 x float> [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 8 ; CHECK-NEXT: store <2 x float> splat (float 0x47EFFFFFE0000000), ptr [[MAXNUM_RES:%.*]], align 8 ; CHECK-NEXT: store <2 x float> [[X]], ptr [[MINIMUM_RES:%.*]], align 8 ; CHECK-NEXT: store <2 x float> splat (float 0x47EFFFFFE0000000), ptr [[MAXIMUM_RES:%.*]], align 8 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call nnan ninf <2 x float> @llvm.minimumnum.v2f32(<2 x float> [[X]], <2 x float> splat (float 0x47EFFFFFE0000000)) -; CHECK-NEXT: store <2 x float> [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 8 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call nnan ninf <2 x float> @llvm.maximumnum.v2f32(<2 x float> [[X]], <2 x float> splat (float 0x47EFFFFFE0000000)) -; CHECK-NEXT: store <2 x float> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: store <2 x float> [[X]], ptr [[MINIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: store <2 x float> splat (float 0x47EFFFFFE0000000), ptr [[MAXIMUMNUM_RES:%.*]], align 8 ; CHECK-NEXT: ret void ; %minnum = call ninf nnan <2 x float> @llvm.minnum.v2f32(<2 x float> %x, <2 x float> splat (float 0x47EFFFFFE0000000)) @@ -513,8 +488,8 @@ define void @minmax_largest_v2f32_ninf_nnan(<2 x float> %x, ptr %minnum_res, ptr ; minnum(X, -largest) -> -largest if ninf (ignoring SNaN -> QNaN propagation) ; maximum(X, -largest) -> X if ninf (ignoring quieting of sNaNs) ; minimum(X, -largest) -> -largest if ninf && nnan -; TODO: maximumnum(X, -largest) -> X if ninf && nnan -; TODO: minimumnum(X, -largest) -> -largest if ninf +; maximumnum(X, -largest) -> X if ninf && nnan +; minimumnum(X, -largest) -> -largest if ninf ; None of these should be optimized away without the nnan/ninf flags define void @minmax_neg_largest_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { @@ -550,7 +525,7 @@ define void @minmax_neg_largest_f32(float %x, ptr %minnum_res, ptr %maxnum_res, ret void } -; We can optimize minnum and maximum if we know ninf is set +; We can optimize minnum, maximum, and minimumnum if we know ninf is set define void @minmax_neg_largest_f32_ninf(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { ; CHECK-LABEL: @minmax_neg_largest_f32_ninf( ; CHECK-NEXT: store float 0xC7EFFFFFE0000000, ptr [[MINNUM_RES:%.*]], align 4 @@ -559,8 +534,7 @@ define void @minmax_neg_largest_f32_ninf(float %x, ptr %minnum_res, ptr %maxnum_ ; CHECK-NEXT: [[MINIMUM:%.*]] = call ninf float @llvm.minimum.f32(float [[X]], float 0xC7EFFFFFE0000000) ; CHECK-NEXT: store float [[MINIMUM]], ptr [[MINIMUM_RES:%.*]], align 4 ; CHECK-NEXT: store float [[X]], ptr [[MAXIMUM_RES:%.*]], align 4 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call ninf float @llvm.minimumnum.f32(float [[X]], float 0xC7EFFFFFE0000000) -; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float 0xC7EFFFFFE0000000, ptr [[MINIMUMNUM_RES:%.*]], align 4 ; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call ninf float @llvm.maximumnum.f32(float [[X]], float 0xC7EFFFFFE0000000) ; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 ; CHECK-NEXT: ret void @@ -583,17 +557,14 @@ define void @minmax_neg_largest_f32_ninf(float %x, ptr %minnum_res, ptr %maxnum_ } ; All can be optimized if both the ninf and nnan flags are set (ignoring SNaN propagation in minnum/maxnum) -; TODO maximumnum/minimumnum define void @minmax_neg_largest_nxv2f32_nnan_ninf(<vscale x 2 x float> %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { ; CHECK-LABEL: @minmax_neg_largest_nxv2f32_nnan_ninf( ; CHECK-NEXT: store <vscale x 2 x float> splat (float 0xC7EFFFFFE0000000), ptr [[MINNUM_RES:%.*]], align 8 ; CHECK-NEXT: store <vscale x 2 x float> [[X:%.*]], ptr [[MAXNUM_RES:%.*]], align 8 ; CHECK-NEXT: store <vscale x 2 x float> splat (float 0xC7EFFFFFE0000000), ptr [[MINIMUM_RES:%.*]], align 8 ; CHECK-NEXT: store <vscale x 2 x float> [[X]], ptr [[MAXIMUM_RES:%.*]], align 8 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call nnan ninf <vscale x 2 x float> @llvm.minimumnum.nxv2f32(<vscale x 2 x float> [[X]], <vscale x 2 x float> splat (float 0xC7EFFFFFE0000000)) -; CHECK-NEXT: store <vscale x 2 x float> [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 8 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call nnan ninf <vscale x 2 x float> @llvm.maximumnum.nxv2f32(<vscale x 2 x float> [[X]], <vscale x 2 x float> splat (float 0xC7EFFFFFE0000000)) -; CHECK-NEXT: store <vscale x 2 x float> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: store <vscale x 2 x float> splat (float 0xC7EFFFFFE0000000), ptr [[MINIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: store <vscale x 2 x float> [[X]], ptr [[MAXIMUMNUM_RES:%.*]], align 8 ; CHECK-NEXT: ret void ; %minnum = call nnan ninf <vscale x 2 x float> @llvm.minnum.nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x float> splat (float 0xC7EFFFFFE0000000)) @@ -614,6 +585,80 @@ define void @minmax_neg_largest_nxv2f32_nnan_ninf(<vscale x 2 x float> %x, ptr % } ;############################################################### +;# Mixed Constant Vector Elements # +;############################################################### +; Tests elementwise handling of different combinations of the above optimizable constants + +; Test with vector variants (v2f64) with +Inf and poison +; Poison element allows for flexibility to choose either X or <poison, +Inf> where applicable +define void @minmax_mixed_pos_inf_poison_v2f64_nnan(<2 x double> %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_mixed_pos_inf_poison_v2f64_nnan( +; CHECK-NEXT: store <2 x double> [[X:%.*]], ptr [[MINNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> <double poison, double 0x7FF0000000000000>, ptr [[MAXNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> [[X]], ptr [[MINIMUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> <double poison, double 0x7FF0000000000000>, ptr [[MAXIMUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> [[X]], ptr [[MINIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <2 x double> <double poison, double 0x7FF0000000000000>, ptr [[MAXIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: ret void +; + %minnum = call nnan <2 x double> @llvm.minnum.v2f64(<2 x double> <double poison, double 0x7FF0000000000000>, <2 x double> %x) + store <2 x double> %minnum, ptr %minnum_res + %maxnum = call nnan <2 x double> @llvm.maxnum.v2f64(<2 x double> <double poison, double 0x7FF0000000000000>, <2 x double> %x) + store <2 x double> %maxnum, ptr %maxnum_res + + %minimum = call nnan <2 x double> @llvm.minimum.v2f64(<2 x double> <double poison, double 0x7FF0000000000000>, <2 x double> %x) + store <2 x double> %minimum, ptr %minimum_res + %maximum = call nnan <2 x double> @llvm.maximum.v2f64(<2 x double> <double poison, double 0x7FF0000000000000>, <2 x double> %x) + store <2 x double> %maximum, ptr %maximum_res + + %minimumnum = call nnan <2 x double> @llvm.minimumnum.v2f64(<2 x double> <double poison, double 0x7FF0000000000000>, <2 x double> %x) + store <2 x double> %minimumnum, ptr %minimumnum_res + %maximumnum = call nnan <2 x double> @llvm.maximumnum.v2f64(<2 x double> <double poison, double 0x7FF0000000000000>, <2 x double> %x) + store <2 x double> %maximumnum, ptr %maximumnum_res + ret void +} + +; Tests to show that we can optimize different classes of constatn (inf/nan/poison) in different vector elements. +; We can only optimize if the result would be choosing all elements of the input X, or all constant elements though +; (where poison allows us to choose either). +; +; nnan minnum(<poison, +Inf, SNaN>, X) = <???, X1, QNaN> (Cannot mix elements from X and constant vector) +; nnan maxnum(<poison, +Inf, SNaN>, X) = <poison +Inf, QNaN> +; nnan minimum(<poison, +Inf, SNaN>, X) = <???, X1, QNaN> (Cannot mix elements from X and constant vector) +; nnan maximum(<poison, +Inf, SNaN>, X) = <poison +Inf, QNaN> +; nnan minimumnum(<poison, +Inf, SNaN>, X) = <X0, X1, X2> (Poison can be either X or constant value) +; nnan maximumnum(<poison, +Inf, SNaN>, X) = <???, +Inf, X2> +define void @minmax_mixed_pos_inf_poison_snan_v3f32(<3 x float> %x, ptr %minnum_res, ptr %maxnum_res, ptr %minimum_res, ptr %maximum_res, ptr %minimumnum_res, ptr %maximumnum_res) { +; CHECK-LABEL: @minmax_mixed_pos_inf_poison_snan_v3f32( +; CHECK-NEXT: [[MINNUM:%.*]] = call nnan <3 x float> @llvm.minnum.v3f32(<3 x float> <float poison, float 0x7FF0000000000000, float 0x7FF4000000000000>, <3 x float> [[X:%.*]]) +; CHECK-NEXT: store <3 x float> [[MINNUM]], ptr [[MINNUM_RES:%.*]], align 16 +; CHECK-NEXT: store <3 x float> <float poison, float 0x7FF0000000000000, float 0x7FFC000000000000>, ptr [[MAXNUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MINIMUM:%.*]] = call nnan <3 x float> @llvm.minimum.v3f32(<3 x float> <float poison, float 0x7FF0000000000000, float 0x7FF4000000000000>, <3 x float> [[X]]) +; CHECK-NEXT: store <3 x float> [[MINIMUM]], ptr [[MINIMUM_RES:%.*]], align 16 +; CHECK-NEXT: store <3 x float> <float poison, float 0x7FF0000000000000, float 0x7FFC000000000000>, ptr [[MAXIMUM_RES:%.*]], align 16 +; CHECK-NEXT: store <3 x float> [[X]], ptr [[MINIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call nnan <3 x float> @llvm.maximumnum.v3f32(<3 x float> <float poison, float 0x7FF0000000000000, float 0x7FF4000000000000>, <3 x float> [[X]]) +; CHECK-NEXT: store <3 x float> [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 16 +; CHECK-NEXT: ret void +; + %minnum = call nnan <3 x float> @llvm.minnum.v3f32(<3 x float> <float poison, float 0x7FF0000000000000, float 0x7FF4000000000000>, <3 x float> %x) + store <3 x float> %minnum, ptr %minnum_res + %maxnum = call nnan <3 x float> @llvm.maxnum.v3f32(<3 x float> <float poison, float 0x7FF0000000000000, float 0x7FF4000000000000>, <3 x float> %x) + store <3 x float> %maxnum, ptr %maxnum_res + + %minimum = call nnan <3 x float> @llvm.minimum.v3f32(<3 x float> <float poison, float 0x7FF0000000000000, float 0x7FF4000000000000>, <3 x float> %x) + store <3 x float> %minimum, ptr %minimum_res + %maximum = call nnan <3 x float> @llvm.maximum.v3f32(<3 x float> <float poison, float 0x7FF0000000000000, float 0x7FF4000000000000>, <3 x float> %x) + store <3 x float> %maximum, ptr %maximum_res + + %minimumnum = call nnan <3 x float> @llvm.minimumnum.v3f32(<3 x float> <float poison, float 0x7FF0000000000000, float 0x7FF4000000000000>, <3 x float> %x) + store <3 x float> %minimumnum, ptr %minimumnum_res + %maximumnum = call nnan <3 x float> @llvm.maximumnum.v3f32(<3 x float> <float poison, float 0x7FF0000000000000, float 0x7FF4000000000000>, <3 x float> %x) + store <3 x float> %maximumnum, ptr %maximumnum_res + ret void +} + +;############################################################### ;# Min(x, x) / Max(x, x) # ;############################################################### ; min(x, x) -> x and max(x, x) -> x for all variants (ignoring SNaN quieting) @@ -623,10 +668,8 @@ define void @minmax_same_args(float %x, ptr %minnum_res, ptr %maxnum_res, ptr %m ; CHECK-NEXT: store float [[X]], ptr [[MAXNUM_RES:%.*]], align 4 ; CHECK-NEXT: store float [[X]], ptr [[MINIMUM_RES:%.*]], align 4 ; CHECK-NEXT: store float [[X]], ptr [[MAXIMUM_RES:%.*]], align 4 -; CHECK-NEXT: [[MINIMUMNUM:%.*]] = call float @llvm.minimumnum.f32(float [[X]], float [[X]]) -; CHECK-NEXT: store float [[MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 4 -; CHECK-NEXT: [[MAXIMUMNUM:%.*]] = call float @llvm.maximumnum.f32(float [[X]], float [[X]]) -; CHECK-NEXT: store float [[MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X]], ptr [[MINIMUMNUM_RES:%.*]], align 4 +; CHECK-NEXT: store float [[X]], ptr [[MAXIMUMNUM_RES:%.*]], align 4 ; CHECK-NEXT: ret void ; %minnum = call float @llvm.minnum.f32(float %x, float %x) @@ -660,11 +703,9 @@ define void @minmax_x_minmax_xy(<2 x float> %x, <2 x float> %y, ptr %minnum_res, ; CHECK-NEXT: [[MAXIMUM_XY:%.*]] = call <2 x float> @llvm.maximum.v2f32(<2 x float> [[X]], <2 x float> [[Y]]) ; CHECK-NEXT: store <2 x float> [[MAXIMUM_XY]], ptr [[MAXIMUM_RES:%.*]], align 8 ; CHECK-NEXT: [[MINIMUMNUM_XY:%.*]] = call <2 x float> @llvm.minimumnum.v2f32(<2 x float> [[X]], <2 x float> [[Y]]) -; CHECK-NEXT: [[MINIMUMNUM_NESTED:%.*]] = call <2 x float> @llvm.minimumnum.v2f32(<2 x float> [[X]], <2 x float> [[MINIMUMNUM_XY]]) -; CHECK-NEXT: store <2 x float> [[MINIMUMNUM_NESTED]], ptr [[MINIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: store <2 x float> [[MINIMUMNUM_XY]], ptr [[MINIMUMNUM_RES:%.*]], align 8 ; CHECK-NEXT: [[MAXIMUMNUM_XY:%.*]] = call <2 x float> @llvm.maximumnum.v2f32(<2 x float> [[X]], <2 x float> [[Y]]) -; CHECK-NEXT: [[MAXIMUMNUM_NESTED:%.*]] = call <2 x float> @llvm.maximumnum.v2f32(<2 x float> [[X]], <2 x float> [[MAXIMUMNUM_XY]]) -; CHECK-NEXT: store <2 x float> [[MAXIMUMNUM_NESTED]], ptr [[MAXIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: store <2 x float> [[MAXIMUMNUM_XY]], ptr [[MAXIMUMNUM_RES:%.*]], align 8 ; CHECK-NEXT: ret void ; %minnum_xy = call <2 x float> @llvm.minnum.v2f32(<2 x float> %x, <2 x float> %y) @@ -758,13 +799,9 @@ define void @minmax_minmax_xy_minmax_yx(half %x, half %y, ptr %minnum_res, ptr % ; CHECK-NEXT: [[MAXIMUM_XY:%.*]] = call half @llvm.maximum.f16(half [[X]], half [[Y]]) ; CHECK-NEXT: store half [[MAXIMUM_XY]], ptr [[MAXIMUM_RES:%.*]], align 2 ; CHECK-NEXT: [[MINIMUMNUM_XY:%.*]] = call half @llvm.minimumnum.f16(half [[X]], half [[Y]]) -; CHECK-NEXT: [[MINIMUMNUM_YX:%.*]] = call half @llvm.minimumnum.f16(half [[Y]], half [[X]]) -; CHECK-NEXT: [[FINAL_MINIMUMNUM:%.*]] = call half @llvm.minimumnum.f16(half [[MINIMUMNUM_XY]], half [[MINIMUMNUM_YX]]) -; CHECK-NEXT: store half [[FINAL_MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 2 +; CHECK-NEXT: store half [[MINIMUMNUM_XY]], ptr [[MINIMUMNUM_RES:%.*]], align 2 ; CHECK-NEXT: [[MAXIMUMNUM_XY:%.*]] = call half @llvm.maximumnum.f16(half [[X]], half [[Y]]) -; CHECK-NEXT: [[MAXIMUMNUM_YX:%.*]] = call half @llvm.maximumnum.f16(half [[Y]], half [[X]]) -; CHECK-NEXT: [[FINAL_MAXIMUMNUM:%.*]] = call half @llvm.maximumnum.f16(half [[MAXIMUMNUM_XY]], half [[MAXIMUMNUM_YX]]) -; CHECK-NEXT: store half [[FINAL_MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 2 +; CHECK-NEXT: store half [[MAXIMUMNUM_XY]], ptr [[MAXIMUMNUM_RES:%.*]], align 2 ; CHECK-NEXT: ret void ; %minnum_xy = call half @llvm.minnum.f16(half %x, half %y) @@ -812,13 +849,9 @@ define void @minmax_minmax_xy_maxmin_yx(double %x, double %y, ptr %minnum_res, p ; CHECK-NEXT: [[MAXIMUM_XY:%.*]] = call double @llvm.maximum.f64(double [[Y]], double [[X]]) ; CHECK-NEXT: store double [[MAXIMUM_XY]], ptr [[MAXIMUM_RES:%.*]], align 8 ; CHECK-NEXT: [[MINIMUMNUM_XY:%.*]] = call double @llvm.minimumnum.f64(double [[Y]], double [[X]]) -; CHECK-NEXT: [[MAXIMUMNUM_XY:%.*]] = call double @llvm.maximumnum.f64(double [[X]], double [[Y]]) -; CHECK-NEXT: [[FINAL_MINIMUMNUM:%.*]] = call double @llvm.minimumnum.f64(double [[MINIMUMNUM_XY]], double [[MAXIMUMNUM_XY]]) -; CHECK-NEXT: store double [[FINAL_MINIMUMNUM]], ptr [[MINIMUMNUM_RES:%.*]], align 8 -; CHECK-NEXT: [[MAXIMUMNUM_XY1:%.*]] = call double @llvm.maximumnum.f64(double [[Y]], double [[X]]) -; CHECK-NEXT: [[MINIMUMNUM_YX:%.*]] = call double @llvm.minimumnum.f64(double [[X]], double [[Y]]) -; CHECK-NEXT: [[FINAL_MAXIMUMNUM:%.*]] = call double @llvm.maximumnum.f64(double [[MAXIMUMNUM_XY1]], double [[MINIMUMNUM_YX]]) -; CHECK-NEXT: store double [[FINAL_MAXIMUMNUM]], ptr [[MAXIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: store double [[MINIMUMNUM_XY]], ptr [[MINIMUMNUM_RES:%.*]], align 8 +; CHECK-NEXT: [[MAXIMUMNUM_XY:%.*]] = call double @llvm.maximumnum.f64(double [[Y]], double [[X]]) +; CHECK-NEXT: store double [[MAXIMUMNUM_XY]], ptr [[MAXIMUMNUM_RES:%.*]], align 8 ; CHECK-NEXT: ret void ; %minnum_xy = call double @llvm.minnum.f64(double %x, double %y) diff --git a/llvm/test/Transforms/InstSimplify/ptrtoint.ll b/llvm/test/Transforms/InstSimplify/ptrtoint.ll index 7346187..3b0e052 100644 --- a/llvm/test/Transforms/InstSimplify/ptrtoint.ll +++ b/llvm/test/Transforms/InstSimplify/ptrtoint.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 ; RUN: opt -S -passes=instsimplify < %s | FileCheck %s +target datalayout = "p1:128:128:128" + define i64 @ptrtoint_gep_sub(ptr %ptr, i64 %end.addr) { ; CHECK-LABEL: define i64 @ptrtoint_gep_sub( ; CHECK-SAME: ptr [[PTR:%.*]], i64 [[END_ADDR:%.*]]) { @@ -136,3 +138,15 @@ define i128 @ptrtoint_gep_sub_wide_type(ptr %ptr, i128 %end.addr) { %end.addr2 = ptrtoint ptr %end to i128 ret i128 %end.addr2 } + +define ptr addrspace(1) @inttoptr_of_ptrtoint_wide(ptr addrspace(1) %ptr) { +; CHECK-LABEL: define ptr addrspace(1) @inttoptr_of_ptrtoint_wide( +; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) { +; CHECK-NEXT: [[INT:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64 +; CHECK-NEXT: [[PTR2:%.*]] = inttoptr i64 [[INT]] to ptr addrspace(1) +; CHECK-NEXT: ret ptr addrspace(1) [[PTR2]] +; + %int = ptrtoint ptr addrspace(1) %ptr to i64 + %ptr2 = inttoptr i64 %int to ptr addrspace(1) + ret ptr addrspace(1) %ptr2 +} diff --git a/llvm/test/Transforms/LoopUnroll/AArch64/apple-unrolling.ll b/llvm/test/Transforms/LoopUnroll/AArch64/apple-unrolling.ll index 3b69527..2e4fc55 100644 --- a/llvm/test/Transforms/LoopUnroll/AArch64/apple-unrolling.ll +++ b/llvm/test/Transforms/LoopUnroll/AArch64/apple-unrolling.ll @@ -15,7 +15,7 @@ define void @small_load_store_loop(ptr %src, ptr %dst, i64 %N, i64 %scale) { ; APPLE-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 ; APPLE-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 7 ; APPLE-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 7 -; APPLE-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; APPLE-NEXT: br i1 [[TMP1]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; APPLE: [[ENTRY_NEW]]: ; APPLE-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] ; APPLE-NEXT: br label %[[LOOP:.*]] @@ -72,18 +72,18 @@ define void @small_load_store_loop(ptr %src, ptr %dst, i64 %N, i64 %scale) { ; APPLE-NEXT: [[IV_NEXT_7]] = add nuw nsw i64 [[IV_EPIL]], 8 ; APPLE-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8 ; APPLE-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]] -; APPLE-NEXT: br i1 [[NITER_NCMP_7]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP]] -; APPLE: [[EXIT_UNR_LCSSA_LOOPEXIT]]: -; APPLE-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_7]], %[[LOOP]] ] -; APPLE-NEXT: br label %[[EXIT_UNR_LCSSA]] +; APPLE-NEXT: br i1 [[NITER_NCMP_7]], label %[[EXIT_UNR_LCSSA:.*]], label %[[LOOP]] ; APPLE: [[EXIT_UNR_LCSSA]]: -; APPLE-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; APPLE-NEXT: [[IV_UNR:%.*]] = phi i64 [ [[IV_NEXT_7]], %[[LOOP]] ] ; APPLE-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; APPLE-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; APPLE-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER]], label %[[EXIT:.*]] ; APPLE: [[LOOP_EPIL_PREHEADER]]: +; APPLE-NEXT: [[IV_EPIL_INIT:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR]], %[[EXIT_UNR_LCSSA]] ] +; APPLE-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; APPLE-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; APPLE-NEXT: br label %[[LOOP_EPIL:.*]] ; APPLE: [[LOOP_EPIL]]: -; APPLE-NEXT: [[IV_EPIL1:%.*]] = phi i64 [ [[IV_UNR]], %[[LOOP_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL1:%.*]], %[[LOOP_EPIL]] ] +; APPLE-NEXT: [[IV_EPIL1:%.*]] = phi i64 [ [[IV_EPIL_INIT]], %[[LOOP_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL1:%.*]], %[[LOOP_EPIL]] ] ; APPLE-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ 0, %[[LOOP_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], %[[LOOP_EPIL]] ] ; APPLE-NEXT: [[SCALED_IV_EPIL1:%.*]] = mul nuw nsw i64 [[IV_EPIL1]], [[SCALE]] ; APPLE-NEXT: [[GEP_SRC_EPIL1:%.*]] = getelementptr inbounds float, ptr [[SRC]], i64 [[SCALED_IV_EPIL1]] @@ -106,7 +106,7 @@ define void @small_load_store_loop(ptr %src, ptr %dst, i64 %N, i64 %scale) { ; OTHER-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 ; OTHER-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 ; OTHER-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 -; OTHER-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; OTHER-NEXT: br i1 [[TMP1]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; OTHER: [[ENTRY_NEW]]: ; OTHER-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] ; OTHER-NEXT: br label %[[LOOP:.*]] @@ -127,15 +127,15 @@ define void @small_load_store_loop(ptr %src, ptr %dst, i64 %N, i64 %scale) { ; OTHER-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 ; OTHER-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 ; OTHER-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; OTHER-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP]] -; OTHER: [[EXIT_UNR_LCSSA_LOOPEXIT]]: -; OTHER-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_1]], %[[LOOP]] ] -; OTHER-NEXT: br label %[[EXIT_UNR_LCSSA]] +; OTHER-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[LOOP]] ; OTHER: [[EXIT_UNR_LCSSA]]: -; OTHER-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; OTHER-NEXT: [[IV_UNR1:%.*]] = phi i64 [ [[IV_NEXT_1]], %[[LOOP]] ] ; OTHER-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; OTHER-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; OTHER-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER]], label %[[EXIT:.*]] ; OTHER: [[LOOP_EPIL_PREHEADER]]: +; OTHER-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR1]], %[[EXIT_UNR_LCSSA]] ] +; OTHER-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; OTHER-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; OTHER-NEXT: br label %[[LOOP_EPIL:.*]] ; OTHER: [[LOOP_EPIL]]: ; OTHER-NEXT: [[SCALED_IV_EPIL:%.*]] = mul nuw nsw i64 [[IV_UNR]], [[SCALE]] @@ -172,7 +172,7 @@ define void @load_op_store_loop(ptr %src, ptr %dst, i64 %N, i64 %scale, float %k ; APPLE-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 ; APPLE-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 ; APPLE-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 -; APPLE-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; APPLE-NEXT: br i1 [[TMP1]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; APPLE: [[ENTRY_NEW]]: ; APPLE-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] ; APPLE-NEXT: br label %[[LOOP:.*]] @@ -195,15 +195,15 @@ define void @load_op_store_loop(ptr %src, ptr %dst, i64 %N, i64 %scale, float %k ; APPLE-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 ; APPLE-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 ; APPLE-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; APPLE-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP]] -; APPLE: [[EXIT_UNR_LCSSA_LOOPEXIT]]: -; APPLE-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_1]], %[[LOOP]] ] -; APPLE-NEXT: br label %[[EXIT_UNR_LCSSA]] +; APPLE-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[LOOP]] ; APPLE: [[EXIT_UNR_LCSSA]]: -; APPLE-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; APPLE-NEXT: [[IV_UNR1:%.*]] = phi i64 [ [[IV_NEXT_1]], %[[LOOP]] ] ; APPLE-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; APPLE-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; APPLE-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER]], label %[[EXIT:.*]] ; APPLE: [[LOOP_EPIL_PREHEADER]]: +; APPLE-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR1]], %[[EXIT_UNR_LCSSA]] ] +; APPLE-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; APPLE-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; APPLE-NEXT: br label %[[LOOP_EPIL:.*]] ; APPLE: [[LOOP_EPIL]]: ; APPLE-NEXT: [[SCALED_IV_EPIL:%.*]] = mul nuw nsw i64 [[IV_UNR]], [[SCALE]] @@ -222,7 +222,7 @@ define void @load_op_store_loop(ptr %src, ptr %dst, i64 %N, i64 %scale, float %k ; OTHER-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 ; OTHER-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 ; OTHER-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 -; OTHER-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; OTHER-NEXT: br i1 [[TMP1]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; OTHER: [[ENTRY_NEW]]: ; OTHER-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] ; OTHER-NEXT: br label %[[LOOP:.*]] @@ -245,15 +245,15 @@ define void @load_op_store_loop(ptr %src, ptr %dst, i64 %N, i64 %scale, float %k ; OTHER-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 ; OTHER-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 ; OTHER-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; OTHER-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP]] -; OTHER: [[EXIT_UNR_LCSSA_LOOPEXIT]]: -; OTHER-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_1]], %[[LOOP]] ] -; OTHER-NEXT: br label %[[EXIT_UNR_LCSSA]] +; OTHER-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[LOOP]] ; OTHER: [[EXIT_UNR_LCSSA]]: -; OTHER-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; OTHER-NEXT: [[IV_UNR1:%.*]] = phi i64 [ [[IV_NEXT_1]], %[[LOOP]] ] ; OTHER-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; OTHER-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; OTHER-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER]], label %[[EXIT:.*]] ; OTHER: [[LOOP_EPIL_PREHEADER]]: +; OTHER-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR1]], %[[EXIT_UNR_LCSSA]] ] +; OTHER-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; OTHER-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; OTHER-NEXT: br label %[[LOOP_EPIL:.*]] ; OTHER: [[LOOP_EPIL]]: ; OTHER-NEXT: [[SCALED_IV_EPIL:%.*]] = mul nuw nsw i64 [[IV_UNR]], [[SCALE]] @@ -375,7 +375,7 @@ define void @early_continue_dep_on_load_large(ptr %p.1, ptr %p.2, i64 %N, i32 %x ; APPLE-NEXT: [[TMP1:%.*]] = add i64 [[N]], -2 ; APPLE-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP0]], 3 ; APPLE-NEXT: [[TMP2:%.*]] = icmp ult i64 [[TMP1]], 3 -; APPLE-NEXT: br i1 [[TMP2]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; APPLE-NEXT: br i1 [[TMP2]], label %[[LOOP_HEADER_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; APPLE: [[ENTRY_NEW]]: ; APPLE-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[TMP0]], [[XTRAITER]] ; APPLE-NEXT: br label %[[LOOP_HEADER:.*]] @@ -439,7 +439,7 @@ define void @early_continue_dep_on_load_large(ptr %p.1, ptr %p.2, i64 %N, i32 %x ; APPLE-NEXT: [[GEP_4_1:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_1]], i64 4 ; APPLE-NEXT: [[L_2_1:%.*]] = load i8, ptr [[GEP_4_1]], align 4 ; APPLE-NEXT: [[C_2_1:%.*]] = icmp ugt i8 [[L_2_1]], 7 -; APPLE-NEXT: br i1 [[C_2_1]], label %[[MERGE_11:.*]], label %[[ELSE_1:.*]] +; APPLE-NEXT: br i1 [[C_2_1]], label %[[MERGE_12:.*]], label %[[ELSE_1:.*]] ; APPLE: [[ELSE_1]]: ; APPLE-NEXT: [[CONV_I_1:%.*]] = zext nneg i8 [[L_2_1]] to i64 ; APPLE-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds [9 x i8], ptr @A, i64 0, i64 [[CONV_I_1]] @@ -449,8 +449,8 @@ define void @early_continue_dep_on_load_large(ptr %p.1, ptr %p.2, i64 %N, i32 %x ; APPLE-NEXT: [[L_4_1:%.*]] = load i32, ptr [[GEP_B_1]], align 4 ; APPLE-NEXT: [[GEP_C_1:%.*]] = getelementptr inbounds [8 x i32], ptr @C, i64 0, i64 [[IDXPROM_I_1]] ; APPLE-NEXT: [[L_5_1:%.*]] = load i32, ptr [[GEP_C_1]], align 4 -; APPLE-NEXT: br label %[[MERGE_11]] -; APPLE: [[MERGE_11]]: +; APPLE-NEXT: br label %[[MERGE_12]] +; APPLE: [[MERGE_12]]: ; APPLE-NEXT: [[MERGE_1_1:%.*]] = phi i32 [ 0, %[[THEN_1]] ], [ [[L_4_1]], %[[ELSE_1]] ] ; APPLE-NEXT: [[MERGE_2_1:%.*]] = phi i32 [ 0, %[[THEN_1]] ], [ [[L_5_1]], %[[ELSE_1]] ] ; APPLE-NEXT: [[ADD14_1:%.*]] = add nsw i32 [[MERGE_2_1]], [[X]] @@ -488,7 +488,7 @@ define void @early_continue_dep_on_load_large(ptr %p.1, ptr %p.2, i64 %N, i32 %x ; APPLE-NEXT: [[GEP_4_2:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_2]], i64 4 ; APPLE-NEXT: [[L_2_2:%.*]] = load i8, ptr [[GEP_4_2]], align 4 ; APPLE-NEXT: [[C_2_2:%.*]] = icmp ugt i8 [[L_2_2]], 7 -; APPLE-NEXT: br i1 [[C_2_2]], label %[[MERGE_22:.*]], label %[[ELSE_2:.*]] +; APPLE-NEXT: br i1 [[C_2_2]], label %[[MERGE_23:.*]], label %[[ELSE_2:.*]] ; APPLE: [[ELSE_2]]: ; APPLE-NEXT: [[CONV_I_2:%.*]] = zext nneg i8 [[L_2_2]] to i64 ; APPLE-NEXT: [[GEP_A_2:%.*]] = getelementptr inbounds [9 x i8], ptr @A, i64 0, i64 [[CONV_I_2]] @@ -498,8 +498,8 @@ define void @early_continue_dep_on_load_large(ptr %p.1, ptr %p.2, i64 %N, i32 %x ; APPLE-NEXT: [[L_4_2:%.*]] = load i32, ptr [[GEP_B_2]], align 4 ; APPLE-NEXT: [[GEP_C_2:%.*]] = getelementptr inbounds [8 x i32], ptr @C, i64 0, i64 [[IDXPROM_I_2]] ; APPLE-NEXT: [[L_5_2:%.*]] = load i32, ptr [[GEP_C_2]], align 4 -; APPLE-NEXT: br label %[[MERGE_22]] -; APPLE: [[MERGE_22]]: +; APPLE-NEXT: br label %[[MERGE_23]] +; APPLE: [[MERGE_23]]: ; APPLE-NEXT: [[MERGE_1_2:%.*]] = phi i32 [ 0, %[[THEN_2]] ], [ [[L_4_2]], %[[ELSE_2]] ] ; APPLE-NEXT: [[MERGE_2_2:%.*]] = phi i32 [ 0, %[[THEN_2]] ], [ [[L_5_2]], %[[ELSE_2]] ] ; APPLE-NEXT: [[ADD14_2:%.*]] = add nsw i32 [[MERGE_2_2]], [[X]] @@ -580,18 +580,18 @@ define void @early_continue_dep_on_load_large(ptr %p.1, ptr %p.2, i64 %N, i32 %x ; APPLE-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV_EPIL]], 4 ; APPLE-NEXT: [[NITER_NEXT_3]] = add i64 [[NITER]], 4 ; APPLE-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i64 [[NITER_NEXT_3]], [[UNROLL_ITER]] -; APPLE-NEXT: br i1 [[NITER_NCMP_3]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP_HEADER]] -; APPLE: [[EXIT_UNR_LCSSA_LOOPEXIT]]: -; APPLE-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_3]], %[[LOOP_LATCH_3]] ] -; APPLE-NEXT: br label %[[EXIT_UNR_LCSSA]] +; APPLE-NEXT: br i1 [[NITER_NCMP_3]], label %[[EXIT_UNR_LCSSA:.*]], label %[[LOOP_HEADER]] ; APPLE: [[EXIT_UNR_LCSSA]]: -; APPLE-NEXT: [[IV_UNR:%.*]] = phi i64 [ 1, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; APPLE-NEXT: [[IV_UNR:%.*]] = phi i64 [ [[IV_NEXT_3]], %[[LOOP_LATCH_3]] ] ; APPLE-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; APPLE-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_HEADER_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; APPLE-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_HEADER_EPIL_PREHEADER]], label %[[EXIT:.*]] ; APPLE: [[LOOP_HEADER_EPIL_PREHEADER]]: +; APPLE-NEXT: [[IV_EPIL_INIT:%.*]] = phi i64 [ 1, %[[ENTRY]] ], [ [[IV_UNR]], %[[EXIT_UNR_LCSSA]] ] +; APPLE-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; APPLE-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; APPLE-NEXT: br label %[[LOOP_HEADER_EPIL:.*]] ; APPLE: [[LOOP_HEADER_EPIL]]: -; APPLE-NEXT: [[IV_EPIL1:%.*]] = phi i64 [ [[IV_UNR]], %[[LOOP_HEADER_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL1:%.*]], %[[LOOP_LATCH_EPIL:.*]] ] +; APPLE-NEXT: [[IV_EPIL1:%.*]] = phi i64 [ [[IV_EPIL_INIT]], %[[LOOP_HEADER_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL1:%.*]], %[[LOOP_LATCH_EPIL:.*]] ] ; APPLE-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ 0, %[[LOOP_HEADER_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], %[[LOOP_LATCH_EPIL]] ] ; APPLE-NEXT: [[GEP_EPIL1:%.*]] = getelementptr { i32, i8, i8, [2 x i8] }, ptr [[P_1]], i64 [[IV_EPIL1]] ; APPLE-NEXT: [[L_1_EPIL1:%.*]] = load i32, ptr [[GEP_EPIL1]], align 4 @@ -1034,7 +1034,7 @@ define i32 @test_add_reduction_runtime(ptr %a, i64 noundef %n) { ; APPLE-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 ; APPLE-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 3 ; APPLE-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 3 -; APPLE-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; APPLE-NEXT: br i1 [[TMP1]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; APPLE: [[ENTRY_NEW]]: ; APPLE-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] ; APPLE-NEXT: br label %[[LOOP:.*]] @@ -1063,26 +1063,25 @@ define i32 @test_add_reduction_runtime(ptr %a, i64 noundef %n) { ; APPLE-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV_EPIL]], 4 ; APPLE-NEXT: [[NITER_NEXT_3]] = add nuw i64 [[NITER]], 4 ; APPLE-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i64 [[NITER_NEXT_3]], [[UNROLL_ITER]] -; APPLE-NEXT: br i1 [[NITER_NCMP_3]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP]] -; APPLE: [[EXIT_UNR_LCSSA_LOOPEXIT]]: -; APPLE-NEXT: [[RES_PH_PH:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ] -; APPLE-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_3]], %[[LOOP]] ] -; APPLE-NEXT: [[RDX_UNR_PH:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ] +; APPLE-NEXT: br i1 [[NITER_NCMP_3]], label %[[EXIT_UNR_LCSSA:.*]], label %[[LOOP]] +; APPLE: [[EXIT_UNR_LCSSA]]: +; APPLE-NEXT: [[RES_PH:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ] +; APPLE-NEXT: [[IV_UNR:%.*]] = phi i64 [ [[IV_NEXT_3]], %[[LOOP]] ] +; APPLE-NEXT: [[RDX_UNR:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ] ; APPLE-NEXT: [[BIN_RDX:%.*]] = add i32 [[RDX_NEXT_1]], [[RDX_NEXT]] ; APPLE-NEXT: [[BIN_RDX2:%.*]] = add i32 [[RDX_NEXT_2]], [[BIN_RDX]] ; APPLE-NEXT: [[BIN_RDX3:%.*]] = add i32 [[RDX_NEXT_3]], [[BIN_RDX2]] -; APPLE-NEXT: br label %[[EXIT_UNR_LCSSA]] -; APPLE: [[EXIT_UNR_LCSSA]]: -; APPLE-NEXT: [[RES_PH:%.*]] = phi i32 [ poison, %[[ENTRY]] ], [ [[BIN_RDX3]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] -; APPLE-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] -; APPLE-NEXT: [[RDX_UNR:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[BIN_RDX3]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] ; APPLE-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; APPLE-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; APPLE-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER]], label %[[EXIT:.*]] ; APPLE: [[LOOP_EPIL_PREHEADER]]: +; APPLE-NEXT: [[IV_EPIL_INIT:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR]], %[[EXIT_UNR_LCSSA]] ] +; APPLE-NEXT: [[RDX_EPIL_INIT:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[BIN_RDX3]], %[[EXIT_UNR_LCSSA]] ] +; APPLE-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; APPLE-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) ; APPLE-NEXT: br label %[[LOOP_EPIL:.*]] ; APPLE: [[LOOP_EPIL]]: -; APPLE-NEXT: [[IV_EPIL1:%.*]] = phi i64 [ [[IV_UNR]], %[[LOOP_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] -; APPLE-NEXT: [[RDX_EPIL:%.*]] = phi i32 [ [[RDX_UNR]], %[[LOOP_EPIL_PREHEADER]] ], [ [[RDX_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] +; APPLE-NEXT: [[IV_EPIL1:%.*]] = phi i64 [ [[IV_EPIL_INIT]], %[[LOOP_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] +; APPLE-NEXT: [[RDX_EPIL:%.*]] = phi i32 [ [[RDX_EPIL_INIT]], %[[LOOP_EPIL_PREHEADER]] ], [ [[RDX_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] ; APPLE-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ 0, %[[LOOP_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], %[[LOOP_EPIL]] ] ; APPLE-NEXT: [[GEP_A_EPIL1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_EPIL1]] ; APPLE-NEXT: [[TMP7:%.*]] = load i32, ptr [[GEP_A_EPIL1]], align 2 @@ -1096,7 +1095,7 @@ define i32 @test_add_reduction_runtime(ptr %a, i64 noundef %n) { ; APPLE-NEXT: [[RES_PH1:%.*]] = phi i32 [ [[RDX_NEXT_EPIL]], %[[LOOP_EPIL]] ] ; APPLE-NEXT: br label %[[EXIT]] ; APPLE: [[EXIT]]: -; APPLE-NEXT: [[RES:%.*]] = phi i32 [ [[RES_PH]], %[[EXIT_UNR_LCSSA]] ], [ [[RES_PH1]], %[[EXIT_EPILOG_LCSSA]] ] +; APPLE-NEXT: [[RES:%.*]] = phi i32 [ [[BIN_RDX3]], %[[EXIT_UNR_LCSSA]] ], [ [[RES_PH1]], %[[EXIT_EPILOG_LCSSA]] ] ; APPLE-NEXT: ret i32 [[RES]] ; ; OTHER-LABEL: define i32 @test_add_reduction_runtime( @@ -1105,7 +1104,7 @@ define i32 @test_add_reduction_runtime(ptr %a, i64 noundef %n) { ; OTHER-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 ; OTHER-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 3 ; OTHER-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 3 -; OTHER-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; OTHER-NEXT: br i1 [[TMP1]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; OTHER: [[ENTRY_NEW]]: ; OTHER-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] ; OTHER-NEXT: br label %[[LOOP:.*]] @@ -1131,23 +1130,22 @@ define i32 @test_add_reduction_runtime(ptr %a, i64 noundef %n) { ; OTHER-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4 ; OTHER-NEXT: [[NITER_NEXT_3]] = add i64 [[NITER]], 4 ; OTHER-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i64 [[NITER_NEXT_3]], [[UNROLL_ITER]] -; OTHER-NEXT: br i1 [[NITER_NCMP_3]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP]] -; OTHER: [[EXIT_UNR_LCSSA_LOOPEXIT]]: -; OTHER-NEXT: [[RES_PH_PH:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ] -; OTHER-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_3]], %[[LOOP]] ] -; OTHER-NEXT: [[RDX_UNR_PH:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ] -; OTHER-NEXT: br label %[[EXIT_UNR_LCSSA]] +; OTHER-NEXT: br i1 [[NITER_NCMP_3]], label %[[EXIT_UNR_LCSSA:.*]], label %[[LOOP]] ; OTHER: [[EXIT_UNR_LCSSA]]: -; OTHER-NEXT: [[RES_PH:%.*]] = phi i32 [ poison, %[[ENTRY]] ], [ [[RES_PH_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] -; OTHER-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] -; OTHER-NEXT: [[RDX_UNR:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; OTHER-NEXT: [[RES_PH:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ] +; OTHER-NEXT: [[IV_UNR:%.*]] = phi i64 [ [[IV_NEXT_3]], %[[LOOP]] ] +; OTHER-NEXT: [[RDX_UNR:%.*]] = phi i32 [ [[RDX_NEXT_3]], %[[LOOP]] ] ; OTHER-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; OTHER-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; OTHER-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER]], label %[[EXIT:.*]] ; OTHER: [[LOOP_EPIL_PREHEADER]]: +; OTHER-NEXT: [[IV_EPIL_INIT:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR]], %[[EXIT_UNR_LCSSA]] ] +; OTHER-NEXT: [[RDX_EPIL_INIT:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_UNR]], %[[EXIT_UNR_LCSSA]] ] +; OTHER-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; OTHER-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) ; OTHER-NEXT: br label %[[LOOP_EPIL:.*]] ; OTHER: [[LOOP_EPIL]]: -; OTHER-NEXT: [[IV_EPIL:%.*]] = phi i64 [ [[IV_UNR]], %[[LOOP_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] -; OTHER-NEXT: [[RDX_EPIL:%.*]] = phi i32 [ [[RDX_UNR]], %[[LOOP_EPIL_PREHEADER]] ], [ [[RDX_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] +; OTHER-NEXT: [[IV_EPIL:%.*]] = phi i64 [ [[IV_EPIL_INIT]], %[[LOOP_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] +; OTHER-NEXT: [[RDX_EPIL:%.*]] = phi i32 [ [[RDX_EPIL_INIT]], %[[LOOP_EPIL_PREHEADER]] ], [ [[RDX_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] ; OTHER-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ 0, %[[LOOP_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], %[[LOOP_EPIL]] ] ; OTHER-NEXT: [[GEP_A_EPIL:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_EPIL]] ; OTHER-NEXT: [[TMP6:%.*]] = load i32, ptr [[GEP_A_EPIL]], align 2 diff --git a/llvm/test/Transforms/LoopUnroll/AArch64/runtime-unroll-generic.ll b/llvm/test/Transforms/LoopUnroll/AArch64/runtime-unroll-generic.ll index b8215d9..66c55f2 100644 --- a/llvm/test/Transforms/LoopUnroll/AArch64/runtime-unroll-generic.ll +++ b/llvm/test/Transforms/LoopUnroll/AArch64/runtime-unroll-generic.ll @@ -69,12 +69,14 @@ define void @runtime_unroll_generic(i32 %arg_0, ptr %arg_1, ptr %arg_2, ptr %arg ; CHECK-A55-NEXT: [[INDVARS_IV_NEXT_3]] = add nuw nsw i64 [[INDVARS_IV]], 4 ; CHECK-A55-NEXT: [[NITER_NEXT_3]] = add i64 [[NITER]], 4 ; CHECK-A55-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i64 [[NITER_NEXT_3]], [[UNROLL_ITER]] -; CHECK-A55-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_END_LOOPEXIT_UNR_LCSSA]], label [[FOR_BODY6]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-A55-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_END_LOOPEXIT_UNR_LCSSA1:%.*]], label [[FOR_BODY6]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-A55: for.end.loopexit.unr-lcssa: -; CHECK-A55-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[FOR_BODY6_PREHEADER]] ], [ [[INDVARS_IV_NEXT_3]], [[FOR_BODY6]] ] ; CHECK-A55-NEXT: [[LCMP_MOD_NOT:%.*]] = icmp eq i64 [[XTRAITER]], 0 -; CHECK-A55-NEXT: br i1 [[LCMP_MOD_NOT]], label [[FOR_END]], label [[FOR_BODY6_EPIL:%.*]] -; CHECK-A55: for.body6.epil: +; CHECK-A55-NEXT: br i1 [[LCMP_MOD_NOT]], label [[FOR_END]], label [[FOR_END_LOOPEXIT_UNR_LCSSA]] +; CHECK-A55: for.body6.epil.preheader: +; CHECK-A55-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[FOR_BODY6_PREHEADER]] ], [ [[INDVARS_IV_NEXT_3]], [[FOR_END_LOOPEXIT_UNR_LCSSA1]] ] +; CHECK-A55-NEXT: [[LCMP_MOD5:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-A55-NEXT: tail call void @llvm.assume(i1 [[LCMP_MOD5]]) ; CHECK-A55-NEXT: [[ARRAYIDX10_EPIL:%.*]] = getelementptr inbounds nuw i16, ptr [[ARG_2]], i64 [[INDVARS_IV_UNR]] ; CHECK-A55-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX10_EPIL]], align 2 ; CHECK-A55-NEXT: [[CONV_EPIL:%.*]] = sext i16 [[TMP13]] to i32 diff --git a/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll b/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll index 38d559f..2bafa08 100644 --- a/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll +++ b/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll @@ -9,7 +9,7 @@ define void @reverse(ptr %dst, ptr %src, i64 %len) { ; APPLE-NEXT: [[TMP0:%.*]] = add i64 [[LEN]], -1 ; APPLE-NEXT: [[XTRAITER:%.*]] = and i64 [[LEN]], 7 ; APPLE-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 7 -; APPLE-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; APPLE-NEXT: br i1 [[TMP1]], label %[[FOR_BODY_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; APPLE: [[ENTRY_NEW]]: ; APPLE-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[LEN]], [[XTRAITER]] ; APPLE-NEXT: br label %[[FOR_BODY:.*]] @@ -66,18 +66,18 @@ define void @reverse(ptr %dst, ptr %src, i64 %len) { ; APPLE-NEXT: [[IV_NEXT_7]] = add nuw nsw i64 [[IV]], 8 ; APPLE-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8 ; APPLE-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]] -; APPLE-NEXT: br i1 [[NITER_NCMP_7]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[FOR_BODY]] -; APPLE: [[EXIT_UNR_LCSSA_LOOPEXIT]]: -; APPLE-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_7]], %[[FOR_BODY]] ] -; APPLE-NEXT: br label %[[EXIT_UNR_LCSSA]] +; APPLE-NEXT: br i1 [[NITER_NCMP_7]], label %[[EXIT_UNR_LCSSA:.*]], label %[[FOR_BODY]] ; APPLE: [[EXIT_UNR_LCSSA]]: -; APPLE-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; APPLE-NEXT: [[IV_UNR:%.*]] = phi i64 [ [[IV_NEXT_7]], %[[FOR_BODY]] ] ; APPLE-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; APPLE-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_BODY_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; APPLE-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_BODY_EPIL_PREHEADER]], label %[[EXIT:.*]] ; APPLE: [[FOR_BODY_EPIL_PREHEADER]]: +; APPLE-NEXT: [[IV_EPIL_INIT:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR]], %[[EXIT_UNR_LCSSA]] ] +; APPLE-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; APPLE-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; APPLE-NEXT: br label %[[FOR_BODY_EPIL:.*]] ; APPLE: [[FOR_BODY_EPIL]]: -; APPLE-NEXT: [[IV_EPIL:%.*]] = phi i64 [ [[IV_UNR]], %[[FOR_BODY_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL:%.*]], %[[FOR_BODY_EPIL]] ] +; APPLE-NEXT: [[IV_EPIL:%.*]] = phi i64 [ [[IV_EPIL_INIT]], %[[FOR_BODY_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL:%.*]], %[[FOR_BODY_EPIL]] ] ; APPLE-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ 0, %[[FOR_BODY_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], %[[FOR_BODY_EPIL]] ] ; APPLE-NEXT: [[TMP18:%.*]] = sub nsw i64 [[LEN]], [[IV_EPIL]] ; APPLE-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP18]] @@ -100,7 +100,7 @@ define void @reverse(ptr %dst, ptr %src, i64 %len) { ; CORTEXA55-NEXT: [[TMP0:%.*]] = add i64 [[LEN]], -1 ; CORTEXA55-NEXT: [[XTRAITER:%.*]] = and i64 [[LEN]], 3 ; CORTEXA55-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 3 -; CORTEXA55-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; CORTEXA55-NEXT: br i1 [[TMP1]], label %[[FOR_BODY_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; CORTEXA55: [[ENTRY_NEW]]: ; CORTEXA55-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[LEN]], [[XTRAITER]] ; CORTEXA55-NEXT: br label %[[FOR_BODY:.*]] @@ -133,15 +133,15 @@ define void @reverse(ptr %dst, ptr %src, i64 %len) { ; CORTEXA55-NEXT: [[IV_NEXT_3]] = add nuw nsw i64 [[IV]], 4 ; CORTEXA55-NEXT: [[NITER_NEXT_3]] = add i64 [[NITER]], 4 ; CORTEXA55-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i64 [[NITER_NEXT_3]], [[UNROLL_ITER]] -; CORTEXA55-NEXT: br i1 [[NITER_NCMP_3]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[FOR_BODY]] -; CORTEXA55: [[EXIT_UNR_LCSSA_LOOPEXIT]]: -; CORTEXA55-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_3]], %[[FOR_BODY]] ] -; CORTEXA55-NEXT: br label %[[EXIT_UNR_LCSSA]] +; CORTEXA55-NEXT: br i1 [[NITER_NCMP_3]], label %[[EXIT_UNR_LCSSA:.*]], label %[[FOR_BODY]] ; CORTEXA55: [[EXIT_UNR_LCSSA]]: -; CORTEXA55-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CORTEXA55-NEXT: [[IV_UNR1:%.*]] = phi i64 [ [[IV_NEXT_3]], %[[FOR_BODY]] ] ; CORTEXA55-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; CORTEXA55-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_BODY_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; CORTEXA55-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_BODY_EPIL_PREHEADER]], label %[[EXIT:.*]] ; CORTEXA55: [[FOR_BODY_EPIL_PREHEADER]]: +; CORTEXA55-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR1]], %[[EXIT_UNR_LCSSA]] ] +; CORTEXA55-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CORTEXA55-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CORTEXA55-NEXT: br label %[[FOR_BODY_EPIL:.*]] ; CORTEXA55: [[FOR_BODY_EPIL]]: ; CORTEXA55-NEXT: [[TMP10:%.*]] = sub nsw i64 [[LEN]], [[IV_UNR]] diff --git a/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-runtime.ll b/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-runtime.ll index 2486b80..adf1e21 100644 --- a/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-runtime.ll +++ b/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-runtime.ll @@ -14,7 +14,7 @@ define amdgpu_kernel void @unroll_when_cascaded_gep(i32 %arg) { ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[ARG:%.*]], 1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[TMP0]], 7 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[ARG]], 7 -; CHECK-NEXT: br i1 [[TMP1]], label [[BB2_UNR_LCSSA:%.*]], label [[BB_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP1]], label [[BB1_EPIL_PREHEADER:%.*]], label [[BB_NEW:%.*]] ; CHECK: bb.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i32 [[TMP0]], [[XTRAITER]] ; CHECK-NEXT: br label [[BB1:%.*]] @@ -24,18 +24,18 @@ define amdgpu_kernel void @unroll_when_cascaded_gep(i32 %arg) { ; CHECK-NEXT: [[ADD_7]] = add i32 [[PHI]], 8 ; CHECK-NEXT: [[NITER_NEXT_7]] = add i32 [[NITER]], 8 ; CHECK-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i32 [[NITER_NEXT_7]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label [[BB2_UNR_LCSSA_LOOPEXIT:%.*]], label [[BB1]] -; CHECK: bb2.unr-lcssa.loopexit: -; CHECK-NEXT: [[PHI_UNR_PH:%.*]] = phi i32 [ [[ADD_7]], [[BB1]] ] -; CHECK-NEXT: br label [[BB2_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label [[BB2_UNR_LCSSA:%.*]], label [[BB1]] ; CHECK: bb2.unr-lcssa: -; CHECK-NEXT: [[PHI_UNR:%.*]] = phi i32 [ 0, [[BB:%.*]] ], [ [[PHI_UNR_PH]], [[BB2_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[PHI_UNR:%.*]] = phi i32 [ [[ADD_7]], [[BB1]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[BB1_EPIL_PREHEADER:%.*]], label [[BB2:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[BB1_EPIL_PREHEADER]], label [[BB2:%.*]] ; CHECK: bb1.epil.preheader: +; CHECK-NEXT: [[PHI_EPIL_INIT:%.*]] = phi i32 [ 0, [[BB:%.*]] ], [ [[PHI_UNR]], [[BB2_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label [[BB1_EPIL:%.*]] ; CHECK: bb1.epil: -; CHECK-NEXT: [[PHI_EPIL:%.*]] = phi i32 [ [[PHI_UNR]], [[BB1_EPIL_PREHEADER]] ], [ [[ADD_EPIL:%.*]], [[BB1_EPIL]] ] +; CHECK-NEXT: [[PHI_EPIL:%.*]] = phi i32 [ [[PHI_EPIL_INIT]], [[BB1_EPIL_PREHEADER]] ], [ [[ADD_EPIL:%.*]], [[BB1_EPIL]] ] ; CHECK-NEXT: [[EPIL_ITER:%.*]] = phi i32 [ 0, [[BB1_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], [[BB1_EPIL]] ] ; CHECK-NEXT: [[GETELEMENTPTR_EPIL:%.*]] = getelementptr [1024 x i32], ptr addrspace(3) getelementptr inbounds nuw (i8, ptr addrspace(3) @global, i32 8), i32 0, i32 0 ; CHECK-NEXT: [[ADD_EPIL]] = add i32 [[PHI_EPIL]], 1 diff --git a/llvm/test/Transforms/LoopUnroll/ARM/multi-blocks.ll b/llvm/test/Transforms/LoopUnroll/ARM/multi-blocks.ll index d2911a1..7dacbf6 100644 --- a/llvm/test/Transforms/LoopUnroll/ARM/multi-blocks.ll +++ b/llvm/test/Transforms/LoopUnroll/ARM/multi-blocks.ll @@ -11,22 +11,21 @@ define void @test_three_blocks(ptr nocapture %Output, ptr nocapture readonly %Co ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[MAXJ]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[MAXJ]], 3 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[TMP0]], 3 -; CHECK-NEXT: br i1 [[TMP1]], label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA:%.*]], label [[FOR_BODY_PREHEADER_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP1]], label [[FOR_BODY_EPIL_PREHEADER:%.*]], label [[FOR_BODY_PREHEADER_NEW:%.*]] ; CHECK: for.body.preheader.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i32 [[MAXJ]], [[XTRAITER]] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.cond.cleanup.loopexit.unr-lcssa.loopexit: +; CHECK: for.cond.cleanup.loopexit.unr-lcssa: ; CHECK-NEXT: [[TEMP_1_LCSSA_PH_PH:%.*]] = phi i32 [ [[TEMP_1_3:%.*]], [[FOR_INC_3:%.*]] ] ; CHECK-NEXT: [[J_010_UNR_PH:%.*]] = phi i32 [ [[INC_3:%.*]], [[FOR_INC_3]] ] ; CHECK-NEXT: [[TEMP_09_UNR_PH:%.*]] = phi i32 [ [[TEMP_1_3]], [[FOR_INC_3]] ] -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]] -; CHECK: for.cond.cleanup.loopexit.unr-lcssa: -; CHECK-NEXT: [[TEMP_1_LCSSA_PH:%.*]] = phi i32 [ poison, [[FOR_BODY_PREHEADER]] ], [ [[TEMP_1_LCSSA_PH_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA_LOOPEXIT:%.*]] ] -; CHECK-NEXT: [[J_010_UNR:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[J_010_UNR_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[TEMP_09_UNR:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[TEMP_09_UNR_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[FOR_BODY_EPIL_PREHEADER:%.*]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: br i1 [[LCMP_MOD1]], label [[FOR_BODY_EPIL_PREHEADER]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] ; CHECK: for.body.epil.preheader: +; CHECK-NEXT: [[J_010_UNR:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[J_010_UNR_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA:%.*]] ] +; CHECK-NEXT: [[TEMP_09_UNR:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[TEMP_09_UNR_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD]]) ; CHECK-NEXT: br label [[FOR_BODY_EPIL:%.*]] ; CHECK: for.body.epil: ; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[CONDITION:%.*]], i32 [[J_010_UNR]] @@ -75,7 +74,7 @@ define void @test_three_blocks(ptr nocapture %Output, ptr nocapture readonly %Co ; CHECK-NEXT: [[TEMP_1_LCSSA_PH1:%.*]] = phi i32 [ [[TEMP_1_EPIL]], [[FOR_INC_EPIL]] ], [ [[TEMP_1_EPIL_1]], [[FOR_INC_EPIL_1]] ], [ [[TEMP_1_EPIL_2]], [[FOR_INC_EPIL_2]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[TEMP_1_LCSSA:%.*]] = phi i32 [ [[TEMP_1_LCSSA_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]] ], [ [[TEMP_1_LCSSA_PH1]], [[FOR_COND_CLEANUP_LOOPEXIT_EPILOG_LCSSA]] ] +; CHECK-NEXT: [[TEMP_1_LCSSA:%.*]] = phi i32 [ [[TEMP_1_LCSSA_PH_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]] ], [ [[TEMP_1_LCSSA_PH1]], [[FOR_COND_CLEANUP_LOOPEXIT_EPILOG_LCSSA]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: [[TEMP_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TEMP_1_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] @@ -135,7 +134,7 @@ define void @test_three_blocks(ptr nocapture %Output, ptr nocapture readonly %Co ; CHECK-NEXT: [[INC_3]] = add nuw i32 [[J_010]], 4 ; CHECK-NEXT: [[NITER_NEXT_3]] = add i32 [[NITER]], 4 ; CHECK-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i32 [[NITER_NEXT_3]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA_LOOPEXIT]], label [[FOR_BODY]] +; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]], label [[FOR_BODY]] ; entry: %cmp8 = icmp eq i32 %MaxJ, 0 @@ -354,24 +353,23 @@ define void @test_four_blocks(ptr nocapture %Output, ptr nocapture readonly %Con ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[MAXJ]], -2 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[TMP0]], 3 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i32 [[TMP1]], 3 -; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA:%.*]], label [[FOR_BODY_LR_PH_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_BODY_EPIL_PREHEADER:%.*]], label [[FOR_BODY_LR_PH_NEW:%.*]] ; CHECK: for.body.lr.ph.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i32 [[TMP0]], [[XTRAITER]] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.cond.cleanup.loopexit.unr-lcssa.loopexit: +; CHECK: for.cond.cleanup.loopexit.unr-lcssa: ; CHECK-NEXT: [[TEMP_1_LCSSA_PH_PH:%.*]] = phi i32 [ [[TEMP_1_3:%.*]], [[FOR_INC_3:%.*]] ] ; CHECK-NEXT: [[I_UNR_PH:%.*]] = phi i32 [ [[I2_3:%.*]], [[FOR_INC_3]] ] ; CHECK-NEXT: [[J_027_UNR_PH:%.*]] = phi i32 [ [[INC_3:%.*]], [[FOR_INC_3]] ] ; CHECK-NEXT: [[TEMP_026_UNR_PH:%.*]] = phi i32 [ [[TEMP_1_3]], [[FOR_INC_3]] ] -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]] -; CHECK: for.cond.cleanup.loopexit.unr-lcssa: -; CHECK-NEXT: [[TEMP_1_LCSSA_PH:%.*]] = phi i32 [ poison, [[FOR_BODY_LR_PH]] ], [ [[TEMP_1_LCSSA_PH_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA_LOOPEXIT:%.*]] ] -; CHECK-NEXT: [[I_UNR:%.*]] = phi i32 [ [[DOTPRE]], [[FOR_BODY_LR_PH]] ], [ [[I_UNR_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[J_027_UNR:%.*]] = phi i32 [ 1, [[FOR_BODY_LR_PH]] ], [ [[J_027_UNR_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[TEMP_026_UNR:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[TEMP_026_UNR_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[FOR_BODY_EPIL_PREHEADER:%.*]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: br i1 [[LCMP_MOD1]], label [[FOR_BODY_EPIL_PREHEADER]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] ; CHECK: for.body.epil.preheader: +; CHECK-NEXT: [[I_UNR:%.*]] = phi i32 [ [[DOTPRE]], [[FOR_BODY_LR_PH]] ], [ [[I_UNR_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA:%.*]] ] +; CHECK-NEXT: [[J_027_UNR:%.*]] = phi i32 [ 1, [[FOR_BODY_LR_PH]] ], [ [[J_027_UNR_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[TEMP_026_UNR:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[TEMP_026_UNR_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD]]) ; CHECK-NEXT: br label [[FOR_BODY_EPIL:%.*]] ; CHECK: for.body.epil: ; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[CONDITION:%.*]], i32 [[J_027_UNR]] @@ -450,7 +448,7 @@ define void @test_four_blocks(ptr nocapture %Output, ptr nocapture readonly %Con ; CHECK-NEXT: [[TEMP_1_LCSSA_PH1:%.*]] = phi i32 [ [[TEMP_1_EPIL]], [[FOR_INC_EPIL]] ], [ [[TEMP_1_EPIL_1]], [[FOR_INC_EPIL_1]] ], [ [[TEMP_1_EPIL_2]], [[FOR_INC_EPIL_2]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[TEMP_1_LCSSA:%.*]] = phi i32 [ [[TEMP_1_LCSSA_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]] ], [ [[TEMP_1_LCSSA_PH1]], [[FOR_COND_CLEANUP_LOOPEXIT_EPILOG_LCSSA]] ] +; CHECK-NEXT: [[TEMP_1_LCSSA:%.*]] = phi i32 [ [[TEMP_1_LCSSA_PH_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]] ], [ [[TEMP_1_LCSSA_PH1]], [[FOR_COND_CLEANUP_LOOPEXIT_EPILOG_LCSSA]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: [[TEMP_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[TEMP_1_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] @@ -551,7 +549,7 @@ define void @test_four_blocks(ptr nocapture %Output, ptr nocapture readonly %Con ; CHECK-NEXT: [[INC_3]] = add nuw i32 [[J_027]], 4 ; CHECK-NEXT: [[NITER_NEXT_3]] = add i32 [[NITER]], 4 ; CHECK-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i32 [[NITER_NEXT_3]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA_LOOPEXIT]], label [[FOR_BODY]] +; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]], label [[FOR_BODY]] ; entry: %cmp25 = icmp ugt i32 %MaxJ, 1 diff --git a/llvm/test/Transforms/LoopUnroll/Hexagon/reuse-lcssa-phi-scev-expansion.ll b/llvm/test/Transforms/LoopUnroll/Hexagon/reuse-lcssa-phi-scev-expansion.ll index f74fb14..8edc133 100644 --- a/llvm/test/Transforms/LoopUnroll/Hexagon/reuse-lcssa-phi-scev-expansion.ll +++ b/llvm/test/Transforms/LoopUnroll/Hexagon/reuse-lcssa-phi-scev-expansion.ll @@ -29,7 +29,7 @@ define void @preserve_lcssa_when_reusing_existing_phi() { ; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[TMP1]], 7 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ult i32 [[TMP2]], 7 -; CHECK-NEXT: br i1 [[TMP3]], label %[[LOOP_1_LATCH_UNR_LCSSA:.*]], label %[[LOOP_4_PREHEADER_NEW:.*]] +; CHECK-NEXT: br i1 [[TMP3]], label %[[LOOP_4_EPIL_PREHEADER:.*]], label %[[LOOP_4_PREHEADER_NEW:.*]] ; CHECK: [[LOOP_4_PREHEADER_NEW]]: ; CHECK-NEXT: br label %[[LOOP_4:.*]] ; CHECK: [[LOOP_2_LATCH]]: @@ -47,18 +47,18 @@ define void @preserve_lcssa_when_reusing_existing_phi() { ; CHECK-NEXT: call void @foo() ; CHECK-NEXT: [[INC_I_7]] = add nuw nsw i32 [[IV_4]], 8 ; CHECK-NEXT: [[NITER_NEXT_7]] = add nuw nsw i32 [[NITER]], 8 -; CHECK-NEXT: br i1 true, label %[[LOOP_1_LATCH_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP_4]] -; CHECK: [[LOOP_1_LATCH_UNR_LCSSA_LOOPEXIT]]: -; CHECK-NEXT: [[IV_4_UNR_PH:%.*]] = phi i32 [ [[INC_I_7]], %[[LOOP_4]] ] -; CHECK-NEXT: br label %[[LOOP_1_LATCH_UNR_LCSSA]] +; CHECK-NEXT: br i1 true, label %[[LOOP_1_LATCH_UNR_LCSSA:.*]], label %[[LOOP_4]] ; CHECK: [[LOOP_1_LATCH_UNR_LCSSA]]: -; CHECK-NEXT: [[IV_4_UNR:%.*]] = phi i32 [ 0, %[[LOOP_4_PREHEADER]] ], [ [[IV_4_UNR_PH]], %[[LOOP_1_LATCH_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[IV_4_UNR:%.*]] = phi i32 [ [[INC_I_7]], %[[LOOP_4]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_4_EPIL_PREHEADER:.*]], label %[[LOOP_1_LATCH:.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_4_EPIL_PREHEADER]], label %[[LOOP_1_LATCH:.*]] ; CHECK: [[LOOP_4_EPIL_PREHEADER]]: +; CHECK-NEXT: [[IV_4_EPIL_INIT:%.*]] = phi i32 [ 0, %[[LOOP_4_PREHEADER]] ], [ [[IV_4_UNR]], %[[LOOP_1_LATCH_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) ; CHECK-NEXT: br label %[[LOOP_4_EPIL:.*]] ; CHECK: [[LOOP_4_EPIL]]: -; CHECK-NEXT: [[IV_4_EPIL:%.*]] = phi i32 [ [[INC_I_EPIL:%.*]], %[[LOOP_4_EPIL]] ], [ [[IV_4_UNR]], %[[LOOP_4_EPIL_PREHEADER]] ] +; CHECK-NEXT: [[IV_4_EPIL:%.*]] = phi i32 [ [[INC_I_EPIL:%.*]], %[[LOOP_4_EPIL]] ], [ [[IV_4_EPIL_INIT]], %[[LOOP_4_EPIL_PREHEADER]] ] ; CHECK-NEXT: [[EPIL_ITER:%.*]] = phi i32 [ 0, %[[LOOP_4_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], %[[LOOP_4_EPIL]] ] ; CHECK-NEXT: call void @foo() ; CHECK-NEXT: [[INC_I_EPIL]] = add i32 [[IV_4_EPIL]], 1 diff --git a/llvm/test/Transforms/LoopUnroll/PowerPC/p8-unrolling-legalize-vectors-inseltpoison.ll b/llvm/test/Transforms/LoopUnroll/PowerPC/p8-unrolling-legalize-vectors-inseltpoison.ll index 456875e..5d08e9d 100644 --- a/llvm/test/Transforms/LoopUnroll/PowerPC/p8-unrolling-legalize-vectors-inseltpoison.ll +++ b/llvm/test/Transforms/LoopUnroll/PowerPC/p8-unrolling-legalize-vectors-inseltpoison.ll @@ -51,16 +51,16 @@ define ptr @f(ptr returned %s, i32 zeroext %x, i32 signext %k) local_unnamed_add ; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 ; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] ; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[MIDDLE_BLOCK_UNR_LCSSA_LOOPEXIT:%.*]], label [[VECTOR_BODY]] -; CHECK: middle.block.unr-lcssa.loopexit: +; CHECK: middle.block.unr-lcssa: ; CHECK-NEXT: [[INDEX_UNR_PH:%.*]] = phi i64 [ [[INDEX_NEXT_1]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND12_UNR_PH:%.*]] = phi <16 x i32> [ [[VEC_IND_NEXT13_1]], [[VECTOR_BODY]] ] -; CHECK-NEXT: br label [[MIDDLE_BLOCK_UNR_LCSSA]] -; CHECK: middle.block.unr-lcssa: -; CHECK-NEXT: [[INDEX_UNR:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_UNR_PH]], [[MIDDLE_BLOCK_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[VEC_IND12_UNR:%.*]] = phi <16 x i32> [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>, [[VECTOR_PH]] ], [ [[VEC_IND12_UNR_PH]], [[MIDDLE_BLOCK_UNR_LCSSA_LOOPEXIT]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[VECTOR_BODY_EPIL_PREHEADER:%.*]], label [[MIDDLE_BLOCK:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[MIDDLE_BLOCK_UNR_LCSSA]], label [[MIDDLE_BLOCK:%.*]] ; CHECK: vector.body.epil.preheader: +; CHECK-NEXT: [[INDEX_UNR:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_UNR_PH]], [[MIDDLE_BLOCK_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[VEC_IND12_UNR:%.*]] = phi <16 x i32> [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>, [[VECTOR_PH]] ], [ [[VEC_IND12_UNR_PH]], [[MIDDLE_BLOCK_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label [[VECTOR_BODY_EPIL:%.*]] ; CHECK: vector.body.epil: ; CHECK-NEXT: [[TMP14:%.*]] = shl <16 x i32> splat (i32 1), [[VEC_IND12_UNR]] diff --git a/llvm/test/Transforms/LoopUnroll/PowerPC/p8-unrolling-legalize-vectors.ll b/llvm/test/Transforms/LoopUnroll/PowerPC/p8-unrolling-legalize-vectors.ll index cd4198f..03277fc 100644 --- a/llvm/test/Transforms/LoopUnroll/PowerPC/p8-unrolling-legalize-vectors.ll +++ b/llvm/test/Transforms/LoopUnroll/PowerPC/p8-unrolling-legalize-vectors.ll @@ -51,16 +51,16 @@ define ptr @f(ptr returned %s, i32 zeroext %x, i32 signext %k) local_unnamed_add ; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 ; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] ; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[MIDDLE_BLOCK_UNR_LCSSA_LOOPEXIT:%.*]], label [[VECTOR_BODY]] -; CHECK: middle.block.unr-lcssa.loopexit: +; CHECK: middle.block.unr-lcssa: ; CHECK-NEXT: [[INDEX_UNR_PH:%.*]] = phi i64 [ [[INDEX_NEXT_1]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND12_UNR_PH:%.*]] = phi <16 x i32> [ [[VEC_IND_NEXT13_1]], [[VECTOR_BODY]] ] -; CHECK-NEXT: br label [[MIDDLE_BLOCK_UNR_LCSSA]] -; CHECK: middle.block.unr-lcssa: -; CHECK-NEXT: [[INDEX_UNR:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_UNR_PH]], [[MIDDLE_BLOCK_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[VEC_IND12_UNR:%.*]] = phi <16 x i32> [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>, [[VECTOR_PH]] ], [ [[VEC_IND12_UNR_PH]], [[MIDDLE_BLOCK_UNR_LCSSA_LOOPEXIT]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[VECTOR_BODY_EPIL_PREHEADER:%.*]], label [[MIDDLE_BLOCK:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[MIDDLE_BLOCK_UNR_LCSSA]], label [[MIDDLE_BLOCK:%.*]] ; CHECK: vector.body.epil.preheader: +; CHECK-NEXT: [[INDEX_UNR:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_UNR_PH]], [[MIDDLE_BLOCK_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[VEC_IND12_UNR:%.*]] = phi <16 x i32> [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>, [[VECTOR_PH]] ], [ [[VEC_IND12_UNR_PH]], [[MIDDLE_BLOCK_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label [[VECTOR_BODY_EPIL:%.*]] ; CHECK: vector.body.epil: ; CHECK-NEXT: [[TMP14:%.*]] = shl <16 x i32> splat (i32 1), [[VEC_IND12_UNR]] diff --git a/llvm/test/Transforms/LoopUnroll/RISCV/vector.ll b/llvm/test/Transforms/LoopUnroll/RISCV/vector.ll index 811d055..b575057 100644 --- a/llvm/test/Transforms/LoopUnroll/RISCV/vector.ll +++ b/llvm/test/Transforms/LoopUnroll/RISCV/vector.ll @@ -26,7 +26,7 @@ define void @reverse(ptr %dst, ptr %src, i64 %len) { ; SIFIVE-NEXT: [[TMP2:%.*]] = add i64 [[LEN]], -1 ; SIFIVE-NEXT: [[XTRAITER:%.*]] = and i64 [[LEN]], 7 ; SIFIVE-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP2]], 7 -; SIFIVE-NEXT: br i1 [[TMP3]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; SIFIVE-NEXT: br i1 [[TMP3]], label %[[FOR_BODY_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; SIFIVE: [[ENTRY_NEW]]: ; SIFIVE-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[LEN]], [[XTRAITER]] ; SIFIVE-NEXT: br label %[[FOR_BODY:.*]] @@ -83,15 +83,15 @@ define void @reverse(ptr %dst, ptr %src, i64 %len) { ; SIFIVE-NEXT: [[IV_NEXT_7]] = add nuw nsw i64 [[IV]], 8 ; SIFIVE-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8 ; SIFIVE-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]] -; SIFIVE-NEXT: br i1 [[NITER_NCMP_7]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[FOR_BODY]] -; SIFIVE: [[EXIT_UNR_LCSSA_LOOPEXIT]]: -; SIFIVE-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_7]], %[[FOR_BODY]] ] -; SIFIVE-NEXT: br label %[[EXIT_UNR_LCSSA]] +; SIFIVE-NEXT: br i1 [[NITER_NCMP_7]], label %[[EXIT_UNR_LCSSA:.*]], label %[[FOR_BODY]] ; SIFIVE: [[EXIT_UNR_LCSSA]]: -; SIFIVE-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; SIFIVE-NEXT: [[IV_UNR1:%.*]] = phi i64 [ [[IV_NEXT_7]], %[[FOR_BODY]] ] ; SIFIVE-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; SIFIVE-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_BODY_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; SIFIVE-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_BODY_EPIL_PREHEADER]], label %[[EXIT:.*]] ; SIFIVE: [[FOR_BODY_EPIL_PREHEADER]]: +; SIFIVE-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR1]], %[[EXIT_UNR_LCSSA]] ] +; SIFIVE-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; SIFIVE-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; SIFIVE-NEXT: br label %[[FOR_BODY_EPIL:.*]] ; SIFIVE: [[FOR_BODY_EPIL]]: ; SIFIVE-NEXT: [[TMP18:%.*]] = sub nsw i64 [[LEN]], [[IV_UNR]] diff --git a/llvm/test/Transforms/LoopUnroll/WebAssembly/basic-unrolling.ll b/llvm/test/Transforms/LoopUnroll/WebAssembly/basic-unrolling.ll index ea499e5..b456ad8 100644 --- a/llvm/test/Transforms/LoopUnroll/WebAssembly/basic-unrolling.ll +++ b/llvm/test/Transforms/LoopUnroll/WebAssembly/basic-unrolling.ll @@ -124,15 +124,17 @@ define hidden void @runtime(ptr nocapture %a, ptr nocapture readonly %b, ptr noc ; CHECK: for.body.preheader: ; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[N]], 1 ; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[N]], 1 -; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA:%.*]], label [[FOR_BODY_PREHEADER_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_BODY_EPIL_PREHEADER:%.*]], label [[FOR_BODY_PREHEADER_NEW:%.*]] ; CHECK: for.body.preheader.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = and i32 [[N]], -2 ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup.loopexit.unr-lcssa: -; CHECK-NEXT: [[I_09_UNR:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INC_1:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[LCMP_MOD_NOT:%.*]] = icmp eq i32 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY_EPIL:%.*]] -; CHECK: for.body.epil: +; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY_EPIL_PREHEADER]] +; CHECK: for.body.epil.preheader: +; CHECK-NEXT: [[I_09_UNR:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INC_1:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA:%.*]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[I_09_UNR]] ; CHECK-NEXT: [[I_EPIL:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4 ; CHECK-NEXT: [[ARRAYIDX1_EPIL:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i32 [[I_09_UNR]] diff --git a/llvm/test/Transforms/LoopUnroll/convergent.controlled.ll b/llvm/test/Transforms/LoopUnroll/convergent.controlled.ll index 7fd4eb1..6e600d2 100644 --- a/llvm/test/Transforms/LoopUnroll/convergent.controlled.ll +++ b/llvm/test/Transforms/LoopUnroll/convergent.controlled.ll @@ -302,7 +302,7 @@ define i32 @pragma_unroll_with_remainder(i32 %n) { ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[TMP0]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i32 [[TMP1]], 1 -; CHECK-NEXT: br i1 [[TMP2]], label [[EXIT_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP2]], label [[L3_EPIL_PREHEADER:%.*]], label [[ENTRY_NEW:%.*]] ; CHECK: entry.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i32 [[TMP0]], [[XTRAITER]] ; CHECK-NEXT: br label [[L3:%.*]], !llvm.loop [[LOOP4]] @@ -316,13 +316,13 @@ define i32 @pragma_unroll_with_remainder(i32 %n) { ; CHECK-NEXT: [[INC_1]] = add nsw i32 [[X_0]], 2 ; CHECK-NEXT: [[NITER_NEXT_1]] = add i32 [[NITER]], 2 ; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i32 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[L3]], !llvm.loop [[LOOP8:![0-9]+]] -; CHECK: exit.unr-lcssa.loopexit: -; CHECK-NEXT: br label [[EXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_UNR_LCSSA:%.*]], label [[L3]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: exit.unr-lcssa: ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[L3_EPIL_PREHEADER:%.*]], label [[EXIT:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[L3_EPIL_PREHEADER]], label [[EXIT:%.*]] ; CHECK: l3.epil.preheader: +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label [[L3_EPIL:%.*]] ; CHECK: l3.epil: ; CHECK-NEXT: [[TOK_LOOP_EPIL:%.*]] = call token @llvm.experimental.convergence.anchor() diff --git a/llvm/test/Transforms/LoopUnroll/followup.ll b/llvm/test/Transforms/LoopUnroll/followup.ll index e4ae7b6..051e43d 100644 --- a/llvm/test/Transforms/LoopUnroll/followup.ll +++ b/llvm/test/Transforms/LoopUnroll/followup.ll @@ -43,7 +43,7 @@ for.end: ; preds = %for.body, %entry ; COUNT: ![[LOOP]] = distinct !{![[LOOP]], ![[FOLLOWUP_ALL]], ![[FOLLOWUP_UNROLLED]]} -; EPILOG: br i1 %niter.ncmp.7, label %for.end.loopexit.unr-lcssa.loopexit, label %for.body, !llvm.loop ![[LOOP_0:[0-9]+]] +; EPILOG: br i1 %niter.ncmp.7, label %for.end.loopexit.unr-lcssa, label %for.body, !llvm.loop ![[LOOP_0:[0-9]+]] ; EPILOG: br i1 %epil.iter.cmp, label %for.body.epil, label %for.end.loopexit.epilog-lcssa, !llvm.loop ![[LOOP_2:[0-9]+]] ; EPILOG: ![[LOOP_0]] = distinct !{![[LOOP_0]], ![[FOLLOWUP_ALL:[0-9]+]], ![[FOLLOWUP_UNROLLED:[0-9]+]]} diff --git a/llvm/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll b/llvm/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll index 835fc2f..ee28aa1 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll @@ -3,9 +3,7 @@ ; Test that epilogue is tagged with the same debug information as original loop body rather than original loop exit. ; CHECK: for.body.i: -; CHECK: br i1 {{.*}}, label %lee1.exit.loopexit.unr-lcssa.loopexit, label %for.body.i, !dbg ![[LOOP_LOC:[0-9]+]] -; CHECK: lee1.exit.loopexit.unr-lcssa.loopexit: -; CHECK: br label %lee1.exit.loopexit.unr-lcssa, !dbg ![[LOOP_LOC]] +; CHECK: br i1 {{.*}}, label %lee1.exit.loopexit.unr-lcssa, label %for.body.i, !dbg ![[LOOP_LOC:[0-9]+]] ; CHECK: lee1.exit.loopexit.unr-lcssa: ; CHECK: %lcmp.mod = icmp ne i32 %xtraiter, 0, !dbg ![[LOOP_LOC]] ; CHECK: br i1 %lcmp.mod, label %for.body.i.epil.preheader, label %lee1.exit.loopexit, !dbg ![[LOOP_LOC]] diff --git a/llvm/test/Transforms/LoopUnroll/runtime-exit-phi-scev-invalidation.ll b/llvm/test/Transforms/LoopUnroll/runtime-exit-phi-scev-invalidation.ll index a97b394..0c52b5a0 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-exit-phi-scev-invalidation.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-exit-phi-scev-invalidation.ll @@ -20,7 +20,7 @@ define void @pr56282() { ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP1]], 7 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP2]], 7 -; CHECK-NEXT: br i1 [[TMP3]], label [[OUTER_MIDDLE_UNR_LCSSA:%.*]], label [[OUTER_HEADER_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP3]], label [[INNER_1_HEADER_EPIL_PREHEADER:%.*]], label [[OUTER_HEADER_NEW:%.*]] ; CHECK: outer.header.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[TMP1]], [[XTRAITER]] ; CHECK-NEXT: br label [[INNER_1_HEADER:%.*]] @@ -62,17 +62,16 @@ define void @pr56282() { ; CHECK: inner.1.latch.7: ; CHECK-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8 ; CHECK-NEXT: [[NITER_NCMP_7:%.*]] = icmp ne i64 [[NITER_NEXT_7]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label [[INNER_1_HEADER]], label [[OUTER_MIDDLE_UNR_LCSSA_LOOPEXIT:%.*]] -; CHECK: outer.middle.unr-lcssa.loopexit: +; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label [[INNER_1_HEADER]], label [[OUTER_MIDDLE_UNR_LCSSA:%.*]] +; CHECK: outer.middle.unr-lcssa: ; CHECK-NEXT: [[V_LCSSA1_PH_PH:%.*]] = phi i32 [ [[V_7]], [[INNER_1_LATCH_7]] ] ; CHECK-NEXT: [[INNER_1_IV_UNR_PH:%.*]] = phi i64 [ [[INNER_1_IV_NEXT_7]], [[INNER_1_LATCH_7]] ] -; CHECK-NEXT: br label [[OUTER_MIDDLE_UNR_LCSSA]] -; CHECK: outer.middle.unr-lcssa: -; CHECK-NEXT: [[V_LCSSA1_PH:%.*]] = phi i32 [ poison, [[OUTER_HEADER]] ], [ [[V_LCSSA1_PH_PH]], [[OUTER_MIDDLE_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[INNER_1_IV_UNR:%.*]] = phi i64 [ 0, [[OUTER_HEADER]] ], [ [[INNER_1_IV_UNR_PH]], [[OUTER_MIDDLE_UNR_LCSSA_LOOPEXIT]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[INNER_1_HEADER_EPIL_PREHEADER:%.*]], label [[OUTER_MIDDLE:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[INNER_1_HEADER_EPIL_PREHEADER]], label [[OUTER_MIDDLE:%.*]] ; CHECK: inner.1.header.epil.preheader: +; CHECK-NEXT: [[INNER_1_IV_UNR:%.*]] = phi i64 [ 0, [[OUTER_HEADER]] ], [ [[INNER_1_IV_UNR_PH]], [[OUTER_MIDDLE_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD3:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD3]]) ; CHECK-NEXT: br label [[INNER_1_HEADER_EPIL:%.*]] ; CHECK: inner.1.header.epil: ; CHECK-NEXT: [[INNER_1_IV_EPIL:%.*]] = phi i64 [ [[INNER_1_IV_UNR]], [[INNER_1_HEADER_EPIL_PREHEADER]] ], [ [[INNER_1_IV_NEXT_EPIL:%.*]], [[INNER_1_LATCH_EPIL:%.*]] ] @@ -90,7 +89,7 @@ define void @pr56282() { ; CHECK-NEXT: [[V_LCSSA1_PH2:%.*]] = phi i32 [ [[V_EPIL]], [[INNER_1_LATCH_EPIL]] ] ; CHECK-NEXT: br label [[OUTER_MIDDLE]] ; CHECK: outer.middle: -; CHECK-NEXT: [[V_LCSSA1:%.*]] = phi i32 [ [[V_LCSSA1_PH]], [[OUTER_MIDDLE_UNR_LCSSA]] ], [ [[V_LCSSA1_PH2]], [[OUTER_MIDDLE_EPILOG_LCSSA]] ] +; CHECK-NEXT: [[V_LCSSA1:%.*]] = phi i32 [ [[V_LCSSA1_PH_PH]], [[OUTER_MIDDLE_UNR_LCSSA]] ], [ [[V_LCSSA1_PH2]], [[OUTER_MIDDLE_EPILOG_LCSSA]] ] ; CHECK-NEXT: [[C_3:%.*]] = icmp ugt i32 [[V_LCSSA1]], 0 ; CHECK-NEXT: br i1 [[C_3]], label [[INNER_2_PREHEADER:%.*]], label [[EXIT:%.*]] ; CHECK: inner.2.preheader: @@ -102,7 +101,7 @@ define void @pr56282() { ; CHECK-NEXT: ret void ; CHECK: exit.deopt.loopexit: ; CHECK-NEXT: br label [[EXIT_DEOPT:%.*]] -; CHECK: exit.deopt.loopexit3: +; CHECK: exit.deopt.loopexit4: ; CHECK-NEXT: br label [[EXIT_DEOPT]] ; CHECK: exit.deopt: ; CHECK-NEXT: call void (...) @llvm.experimental.deoptimize.isVoid(i32 0) [ "deopt"() ] @@ -233,7 +232,7 @@ define void @pr56286(i64 %x, ptr %src, ptr %dst, ptr %ptr.src) !prof !0 { ; CHECK-NEXT: store i32 [[L_1_7]], ptr [[DST]], align 8 ; CHECK-NEXT: [[INNER_1_IV_NEXT_7]] = add i64 [[INNER_1_IV]], 8 ; CHECK-NEXT: [[CMP_2_7:%.*]] = icmp sgt i64 [[INNER_1_IV_NEXT_6]], 0 -; CHECK-NEXT: br i1 [[CMP_2_7]], label [[OUTER_MIDDLE_UNR_LCSSA:%.*]], label [[INNER_1_HEADER]], !prof [[PROF6:![0-9]+]] +; CHECK-NEXT: br i1 [[CMP_2_7]], label [[OUTER_MIDDLE_UNR_LCSSA:%.*]], label [[INNER_1_HEADER]], !prof [[PROF6:![0-9]+]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: outer.middle.unr-lcssa: ; CHECK-NEXT: [[L_1_LCSSA_PH:%.*]] = phi i32 [ [[L_1_7]], [[INNER_1_LATCH_7]] ] ; CHECK-NEXT: br label [[OUTER_MIDDLE]] diff --git a/llvm/test/Transforms/LoopUnroll/runtime-i128.ll b/llvm/test/Transforms/LoopUnroll/runtime-i128.ll index 4cd8e7c..fec8626 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-i128.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-i128.ll @@ -11,7 +11,7 @@ define void @test(i128 %n, i128 %m) { ; CHECK-NEXT: [[TMP1:%.*]] = add i128 [[TMP0]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i128 [[TMP0]], 7 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i128 [[TMP1]], 7 -; CHECK-NEXT: br i1 [[TMP2]], label [[EXIT_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP2]], label [[LOOP_EPIL_PREHEADER:%.*]], label [[ENTRY_NEW:%.*]] ; CHECK: entry.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i128 [[TMP0]], [[XTRAITER]] ; CHECK-NEXT: br label [[LOOP:%.*]] @@ -29,18 +29,18 @@ define void @test(i128 %n, i128 %m) { ; CHECK-NEXT: [[IV_NEXT_7]] = add i128 [[IV]], 8 ; CHECK-NEXT: [[NITER_NEXT_7]] = add i128 [[NITER]], 8 ; CHECK-NEXT: [[NITER_NCMP_7:%.*]] = icmp ne i128 [[NITER_NEXT_7]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label [[LOOP]], label [[EXIT_UNR_LCSSA_LOOPEXIT:%.*]] -; CHECK: exit.unr-lcssa.loopexit: -; CHECK-NEXT: [[IV_UNR_PH:%.*]] = phi i128 [ [[IV_NEXT_7]], [[LOOP]] ] -; CHECK-NEXT: br label [[EXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label [[LOOP]], label [[EXIT_UNR_LCSSA:%.*]] ; CHECK: exit.unr-lcssa: -; CHECK-NEXT: [[IV_UNR:%.*]] = phi i128 [ 0, [[ENTRY:%.*]] ], [ [[IV_UNR_PH]], [[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[IV_UNR:%.*]] = phi i128 [ [[IV_NEXT_7]], [[LOOP]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i128 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[LOOP_EPIL_PREHEADER:%.*]], label [[EXIT:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[LOOP_EPIL_PREHEADER]], label [[EXIT:%.*]] ; CHECK: loop.epil.preheader: +; CHECK-NEXT: [[IV_EPIL_INIT:%.*]] = phi i128 [ 0, [[ENTRY:%.*]] ], [ [[IV_UNR]], [[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i128 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label [[LOOP_EPIL:%.*]] ; CHECK: loop.epil: -; CHECK-NEXT: [[IV_EPIL:%.*]] = phi i128 [ [[IV_UNR]], [[LOOP_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL:%.*]], [[LOOP_EPIL]] ] +; CHECK-NEXT: [[IV_EPIL:%.*]] = phi i128 [ [[IV_EPIL_INIT]], [[LOOP_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL:%.*]], [[LOOP_EPIL]] ] ; CHECK-NEXT: [[EPIL_ITER:%.*]] = phi i128 [ 0, [[LOOP_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], [[LOOP_EPIL]] ] ; CHECK-NEXT: call void @foo() ; CHECK-NEXT: [[IV_NEXT_EPIL]] = add i128 [[IV_EPIL]], 1 diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop-at-most-two-exits.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop-at-most-two-exits.ll index 8472a8c..85de29d 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-loop-at-most-two-exits.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-loop-at-most-two-exits.ll @@ -9,7 +9,7 @@ define i32 @test(ptr nocapture %a, i64 %n) { ; ENABLED-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], -1 ; ENABLED-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP0]], 7 ; ENABLED-NEXT: [[TMP2:%.*]] = icmp ult i64 [[TMP1]], 7 -; ENABLED-NEXT: br i1 [[TMP2]], label [[FOR_END_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]] +; ENABLED-NEXT: br i1 [[TMP2]], label [[HEADER_EPIL_PREHEADER:%.*]], label [[ENTRY_NEW:%.*]] ; ENABLED: entry.new: ; ENABLED-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[TMP0]], [[XTRAITER]] ; ENABLED-NEXT: br label [[HEADER:%.*]] @@ -71,23 +71,22 @@ define i32 @test(ptr nocapture %a, i64 %n) { ; ENABLED-NEXT: [[INDVARS_IV_NEXT_7]] = add i64 [[INDVARS_IV]], 8 ; ENABLED-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8 ; ENABLED-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]] -; ENABLED-NEXT: br i1 [[NITER_NCMP_7]], label [[FOR_END_UNR_LCSSA_LOOPEXIT:%.*]], label [[HEADER]] -; ENABLED: for.end.unr-lcssa.loopexit: +; ENABLED-NEXT: br i1 [[NITER_NCMP_7]], label [[FOR_END_UNR_LCSSA:%.*]], label [[HEADER]] +; ENABLED: for.end.unr-lcssa: ; ENABLED-NEXT: [[SUM_0_LCSSA_PH_PH:%.*]] = phi i32 [ [[ADD_7]], [[FOR_BODY_7]] ] ; ENABLED-NEXT: [[INDVARS_IV_UNR_PH:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_7]], [[FOR_BODY_7]] ] ; ENABLED-NEXT: [[SUM_02_UNR_PH:%.*]] = phi i32 [ [[ADD_7]], [[FOR_BODY_7]] ] -; ENABLED-NEXT: br label [[FOR_END_UNR_LCSSA]] -; ENABLED: for.end.unr-lcssa: -; ENABLED-NEXT: [[SUM_0_LCSSA_PH:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[SUM_0_LCSSA_PH_PH]], [[FOR_END_UNR_LCSSA_LOOPEXIT]] ] -; ENABLED-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_UNR_PH]], [[FOR_END_UNR_LCSSA_LOOPEXIT]] ] -; ENABLED-NEXT: [[SUM_02_UNR:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_02_UNR_PH]], [[FOR_END_UNR_LCSSA_LOOPEXIT]] ] ; ENABLED-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; ENABLED-NEXT: br i1 [[LCMP_MOD]], label [[HEADER_EPIL_PREHEADER:%.*]], label [[FOR_END:%.*]] +; ENABLED-NEXT: br i1 [[LCMP_MOD]], label [[HEADER_EPIL_PREHEADER]], label [[FOR_END:%.*]] ; ENABLED: header.epil.preheader: +; ENABLED-NEXT: [[INDVARS_IV_EPIL_INIT:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_UNR_PH]], [[FOR_END_UNR_LCSSA]] ] +; ENABLED-NEXT: [[SUM_02_EPIL_INIT:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_02_UNR_PH]], [[FOR_END_UNR_LCSSA]] ] +; ENABLED-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; ENABLED-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) ; ENABLED-NEXT: br label [[HEADER_EPIL:%.*]] ; ENABLED: header.epil: -; ENABLED-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[FOR_BODY_EPIL:%.*]] ], [ [[INDVARS_IV_UNR]], [[HEADER_EPIL_PREHEADER]] ] -; ENABLED-NEXT: [[SUM_02_EPIL:%.*]] = phi i32 [ [[ADD_EPIL:%.*]], [[FOR_BODY_EPIL]] ], [ [[SUM_02_UNR]], [[HEADER_EPIL_PREHEADER]] ] +; ENABLED-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[FOR_BODY_EPIL:%.*]] ], [ [[INDVARS_IV_EPIL_INIT]], [[HEADER_EPIL_PREHEADER]] ] +; ENABLED-NEXT: [[SUM_02_EPIL:%.*]] = phi i32 [ [[ADD_EPIL:%.*]], [[FOR_BODY_EPIL]] ], [ [[SUM_02_EPIL_INIT]], [[HEADER_EPIL_PREHEADER]] ] ; ENABLED-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ 0, [[HEADER_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], [[FOR_BODY_EPIL]] ] ; ENABLED-NEXT: [[CMP_EPIL:%.*]] = icmp eq i64 [[N]], 42 ; ENABLED-NEXT: br i1 [[CMP_EPIL]], label [[FOR_EXIT2_LOOPEXIT2:%.*]], label [[FOR_BODY_EPIL]] @@ -104,12 +103,12 @@ define i32 @test(ptr nocapture %a, i64 %n) { ; ENABLED-NEXT: [[SUM_0_LCSSA_PH1:%.*]] = phi i32 [ [[ADD_EPIL]], [[FOR_BODY_EPIL]] ] ; ENABLED-NEXT: br label [[FOR_END]] ; ENABLED: for.end: -; ENABLED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA_PH]], [[FOR_END_UNR_LCSSA]] ], [ [[SUM_0_LCSSA_PH1]], [[FOR_END_EPILOG_LCSSA]] ] +; ENABLED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA_PH_PH]], [[FOR_END_UNR_LCSSA]] ], [ [[SUM_0_LCSSA_PH1]], [[FOR_END_EPILOG_LCSSA]] ] ; ENABLED-NEXT: ret i32 [[SUM_0_LCSSA]] ; ENABLED: for.exit2.loopexit: ; ENABLED-NEXT: [[RETVAL_PH:%.*]] = phi i32 [ [[SUM_02]], [[HEADER]] ], [ [[ADD]], [[FOR_BODY]] ], [ [[ADD_1]], [[FOR_BODY_1]] ], [ [[ADD_2]], [[FOR_BODY_2]] ], [ [[ADD_3]], [[FOR_BODY_3]] ], [ [[ADD_4]], [[FOR_BODY_4]] ], [ [[ADD_5]], [[FOR_BODY_5]] ], [ [[ADD_6]], [[FOR_BODY_6]] ] ; ENABLED-NEXT: br label [[FOR_EXIT2:%.*]] -; ENABLED: for.exit2.loopexit2: +; ENABLED: for.exit2.loopexit3: ; ENABLED-NEXT: [[RETVAL_PH3:%.*]] = phi i32 [ [[SUM_02_EPIL]], [[HEADER_EPIL]] ] ; ENABLED-NEXT: br label [[FOR_EXIT2]] ; ENABLED: for.exit2: diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop-branchweight.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop-branchweight.ll index 6e3bbe1..2617199 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-loop-branchweight.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-loop-branchweight.ll @@ -3,7 +3,7 @@ ;; Check that the remainder loop is properly assigned a branch weight for its latch branch. ; CHECK-LABEL: @test( ; CHECK-LABEL: for.body: -; CHECK: br i1 [[COND1:%.*]], label %for.end.loopexit.unr-lcssa.loopexit, label %for.body, !prof ![[#PROF:]], !llvm.loop ![[#LOOP:]] +; CHECK: br i1 [[COND1:%.*]], label %for.end.loopexit.unr-lcssa, label %for.body, !prof ![[#PROF:]], !llvm.loop ![[#LOOP:]] ; CHECK-LABEL: for.body.epil: ; CHECK: br i1 [[COND2:%.*]], label %for.body.epil, label %for.end.loopexit.epilog-lcssa, !prof ![[#PROF2:]], !llvm.loop ![[#LOOP2:]] ; CHECK: ![[#PROF]] = !{!"branch_weights", i32 1, i32 2499} diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll index 5f6e66e..6835e9b 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-loop-multiple-exits.ll @@ -15,7 +15,7 @@ define void @test1(i64 %trip, i1 %cond) { ; EPILOG-NEXT: %1 = add i64 %0, -1 ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %exit2.loopexit.unr-lcssa, label %entry.new +; EPILOG-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG: entry.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %loop_header @@ -29,7 +29,7 @@ define void @test1(i64 %trip, i1 %cond) { ; EPILOG-NEXT: br i1 false, label %loop_latch, label %exit3.loopexit ; EPILOG: exit3.loopexit: ; EPILOG-NEXT: br label %exit3 -; EPILOG: exit3.loopexit2: +; EPILOG: exit3.loopexit3: ; EPILOG-NEXT: br label %exit3 ; EPILOG: exit3: ; EPILOG-NEXT: ret void @@ -79,30 +79,30 @@ define void @test1(i64 %trip, i1 %cond) { ; EPILOG-NEXT: %iv_next.7 = add i64 %iv, 8 ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp ne i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %exit2.loopexit.unr-lcssa.loopexit +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %exit2.loopexit.unr-lcssa ; EPILOG: exit1.loopexit: ; EPILOG-NEXT: br label %exit1 -; EPILOG: exit1.loopexit1: +; EPILOG: exit1.loopexit2: ; EPILOG-NEXT: br label %exit1 ; EPILOG: exit1: ; EPILOG-NEXT: ret void -; EPILOG: exit2.loopexit.unr-lcssa.loopexit: -; EPILOG-NEXT: %iv.unr.ph = phi i64 [ %iv_next.7, %loop_latch.7 ] -; EPILOG-NEXT: br label %exit2.loopexit.unr-lcssa ; EPILOG: exit2.loopexit.unr-lcssa: -; EPILOG-NEXT: %iv.unr = phi i64 [ 0, %entry ], [ %iv.unr.ph, %exit2.loopexit.unr-lcssa.loopexit ] +; EPILOG-NEXT: %iv.unr = phi i64 [ %iv_next.7, %loop_latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %exit2.loopexit ; EPILOG: loop_header.epil.preheader: +; EPILOG-NEXT: %iv.epil.init = phi i64 [ 0, %entry ], [ %iv.unr, %exit2.loopexit.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-NEXT: br label %loop_header.epil ; EPILOG: loop_header.epil: -; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.unr, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] +; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.epil.init, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %loop_header.epil.preheader ], [ %epil.iter.next, %loop_latch.epil ] ; EPILOG-NEXT: br i1 %cond, label %loop_latch.epil, label %loop_exiting_bb1.epil ; EPILOG: loop_exiting_bb1.epil: -; EPILOG-NEXT: br i1 false, label %loop_exiting_bb2.epil, label %exit1.loopexit1 +; EPILOG-NEXT: br i1 false, label %loop_exiting_bb2.epil, label %exit1.loopexit2 ; EPILOG: loop_exiting_bb2.epil: -; EPILOG-NEXT: br i1 false, label %loop_latch.epil, label %exit3.loopexit2 +; EPILOG-NEXT: br i1 false, label %loop_latch.epil, label %exit3.loopexit3 ; EPILOG: loop_latch.epil: ; EPILOG-NEXT: %iv_next.epil = add i64 %iv.epil, 1 ; EPILOG-NEXT: %cmp.epil = icmp ne i64 %iv_next.epil, %trip @@ -120,7 +120,7 @@ define void @test1(i64 %trip, i1 %cond) { ; EPILOG-BLOCK-NEXT: %1 = add i64 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %exit2.loopexit.unr-lcssa, label %entry.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG-BLOCK: entry.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %loop_header @@ -146,17 +146,17 @@ define void @test1(i64 %trip, i1 %cond) { ; EPILOG-BLOCK-NEXT: %iv_next.1 = add i64 %iv, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp ne i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %exit2.loopexit.unr-lcssa.loopexit, !llvm.loop !0 +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %exit2.loopexit.unr-lcssa, !llvm.loop !0 ; EPILOG-BLOCK: exit1.loopexit: ; EPILOG-BLOCK-NEXT: br label %exit1 ; EPILOG-BLOCK: exit1: ; EPILOG-BLOCK-NEXT: ret void -; EPILOG-BLOCK: exit2.loopexit.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: br label %exit2.loopexit.unr-lcssa ; EPILOG-BLOCK: exit2.loopexit.unr-lcssa: ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %exit2.loopexit ; EPILOG-BLOCK: loop_header.epil.preheader: +; EPILOG-BLOCK-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-BLOCK-NEXT: br label %loop_header.epil ; EPILOG-BLOCK: loop_header.epil: ; EPILOG-BLOCK-NEXT: br i1 %cond, label %loop_latch.epil, label %loop_exiting_bb1.epil @@ -366,7 +366,7 @@ define i32 @test2(ptr nocapture %a, i64 %n) { ; EPILOG-NEXT: %1 = add i64 %0, -1 ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %for.end.unr-lcssa, label %entry.new +; EPILOG-NEXT: br i1 %2, label %header.epil.preheader, label %entry.new ; EPILOG: entry.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %header @@ -448,28 +448,27 @@ define i32 @test2(ptr nocapture %a, i64 %n) { ; EPILOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv, 8 ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp eq i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %for.end.unr-lcssa.loopexit, label %header -; EPILOG: for.end.unr-lcssa.loopexit: -; EPILOG-NEXT: %sum.0.lcssa.ph.ph = phi i32 [ %add.7, %for.body.7 ] -; EPILOG-NEXT: %indvars.iv.unr.ph = phi i64 [ %indvars.iv.next.7, %for.body.7 ] -; EPILOG-NEXT: %sum.02.unr.ph = phi i32 [ %add.7, %for.body.7 ] -; EPILOG-NEXT: br label %for.end.unr-lcssa +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %for.end.unr-lcssa, label %header ; EPILOG: for.end.unr-lcssa: -; EPILOG-NEXT: %sum.0.lcssa.ph = phi i32 [ poison, %entry ], [ %sum.0.lcssa.ph.ph, %for.end.unr-lcssa.loopexit ] -; EPILOG-NEXT: %indvars.iv.unr = phi i64 [ 0, %entry ], [ %indvars.iv.unr.ph, %for.end.unr-lcssa.loopexit ] -; EPILOG-NEXT: %sum.02.unr = phi i32 [ 0, %entry ], [ %sum.02.unr.ph, %for.end.unr-lcssa.loopexit ] +; EPILOG-NEXT: %sum.0.lcssa.ph = phi i32 [ %add.7, %for.body.7 ] +; EPILOG-NEXT: %indvars.iv.unr = phi i64 [ %indvars.iv.next.7, %for.body.7 ] +; EPILOG-NEXT: %sum.02.unr = phi i32 [ %add.7, %for.body.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %for.end ; EPILOG: header.epil.preheader: +; EPILOG-NEXT: %indvars.iv.epil.init = phi i64 [ 0, %entry ], [ %indvars.iv.unr, %for.end.unr-lcssa ] +; EPILOG-NEXT: %sum.02.epil.init = phi i32 [ 0, %entry ], [ %sum.02.unr, %for.end.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod2 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-NEXT: br label %header.epil ; EPILOG: header.epil: -; EPILOG-NEXT: %indvars.iv.epil = phi i64 [ %indvars.iv.next.epil, %for.body.epil ], [ %indvars.iv.unr, %header.epil.preheader ] -; EPILOG-NEXT: %sum.02.epil = phi i32 [ %add.epil, %for.body.epil ], [ %sum.02.unr, %header.epil.preheader ] +; EPILOG-NEXT: %indvars.iv.epil = phi i64 [ %indvars.iv.next.epil, %for.body.epil ], [ %indvars.iv.epil.init, %header.epil.preheader ] +; EPILOG-NEXT: %sum.02.epil = phi i32 [ %add.epil, %for.body.epil ], [ %sum.02.epil.init, %header.epil.preheader ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %header.epil.preheader ], [ %epil.iter.next, %for.body.epil ] -; EPILOG-NEXT: br i1 false, label %for.exit2.loopexit2, label %for.exiting_block.epil +; EPILOG-NEXT: br i1 false, label %for.exit2.loopexit3, label %for.exiting_block.epil ; EPILOG: for.exiting_block.epil: ; EPILOG-NEXT: %cmp.epil = icmp eq i64 %n, 42 -; EPILOG-NEXT: br i1 %cmp.epil, label %for.exit2.loopexit2, label %for.body.epil +; EPILOG-NEXT: br i1 %cmp.epil, label %for.exit2.loopexit3, label %for.body.epil ; EPILOG: for.body.epil: ; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil ; EPILOG-NEXT: %11 = load i32, ptr %arrayidx.epil, align 4 @@ -488,11 +487,11 @@ define i32 @test2(ptr nocapture %a, i64 %n) { ; EPILOG: for.exit2.loopexit: ; EPILOG-NEXT: %retval.ph = phi i32 [ 42, %for.exiting_block ], [ %sum.02, %header ], [ %add, %for.body ], [ 42, %for.exiting_block.1 ], [ %add.1, %for.body.1 ], [ 42, %for.exiting_block.2 ], [ %add.2, %for.body.2 ], [ 42, %for.exiting_block.3 ], [ %add.3, %for.body.3 ], [ 42, %for.exiting_block.4 ], [ %add.4, %for.body.4 ], [ 42, %for.exiting_block.5 ], [ %add.5, %for.body.5 ], [ 42, %for.exiting_block.6 ], [ %add.6, %for.body.6 ], [ 42, %for.exiting_block.7 ] ; EPILOG-NEXT: br label %for.exit2 -; EPILOG: for.exit2.loopexit2: -; EPILOG-NEXT: %retval.ph3 = phi i32 [ 42, %for.exiting_block.epil ], [ %sum.02.epil, %header.epil ] +; EPILOG: for.exit2.loopexit3: +; EPILOG-NEXT: %retval.ph4 = phi i32 [ 42, %for.exiting_block.epil ], [ %sum.02.epil, %header.epil ] ; EPILOG-NEXT: br label %for.exit2 ; EPILOG: for.exit2: -; EPILOG-NEXT: %retval = phi i32 [ %retval.ph, %for.exit2.loopexit ], [ %retval.ph3, %for.exit2.loopexit2 ] +; EPILOG-NEXT: %retval = phi i32 [ %retval.ph, %for.exit2.loopexit ], [ %retval.ph4, %for.exit2.loopexit3 ] ; EPILOG-NEXT: ret i32 %retval ; ; EPILOG-BLOCK-LABEL: @test2( @@ -501,7 +500,7 @@ define i32 @test2(ptr nocapture %a, i64 %n) { ; EPILOG-BLOCK-NEXT: %1 = add i64 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %for.end.unr-lcssa, label %entry.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %header.epil.preheader, label %entry.new ; EPILOG-BLOCK: entry.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %header @@ -529,19 +528,18 @@ define i32 @test2(ptr nocapture %a, i64 %n) { ; EPILOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp eq i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %for.end.unr-lcssa.loopexit, label %header, !llvm.loop !2 -; EPILOG-BLOCK: for.end.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: %sum.0.lcssa.ph.ph = phi i32 [ %add.1, %for.body.1 ] -; EPILOG-BLOCK-NEXT: %indvars.iv.unr.ph = phi i64 [ %indvars.iv.next.1, %for.body.1 ] -; EPILOG-BLOCK-NEXT: %sum.02.unr.ph = phi i32 [ %add.1, %for.body.1 ] -; EPILOG-BLOCK-NEXT: br label %for.end.unr-lcssa +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %for.end.unr-lcssa, label %header, !llvm.loop !2 ; EPILOG-BLOCK: for.end.unr-lcssa: -; EPILOG-BLOCK-NEXT: %sum.0.lcssa.ph = phi i32 [ poison, %entry ], [ %sum.0.lcssa.ph.ph, %for.end.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %indvars.iv.unr = phi i64 [ 0, %entry ], [ %indvars.iv.unr.ph, %for.end.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %sum.02.unr = phi i32 [ 0, %entry ], [ %sum.02.unr.ph, %for.end.unr-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %sum.0.lcssa.ph = phi i32 [ %add.1, %for.body.1 ] +; EPILOG-BLOCK-NEXT: %indvars.iv.unr = phi i64 [ %indvars.iv.next.1, %for.body.1 ] +; EPILOG-BLOCK-NEXT: %sum.02.unr = phi i32 [ %add.1, %for.body.1 ] ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %for.end ; EPILOG-BLOCK: header.epil.preheader: +; EPILOG-BLOCK-NEXT: %indvars.iv.epil.init = phi i64 [ 0, %entry ], [ %indvars.iv.unr, %for.end.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %sum.02.epil.init = phi i32 [ 0, %entry ], [ %sum.02.unr, %for.end.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %lcmp.mod2 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-BLOCK-NEXT: br label %header.epil ; EPILOG-BLOCK: header.epil: ; EPILOG-BLOCK-NEXT: br i1 false, label %for.exit2, label %for.exiting_block.epil @@ -549,9 +547,9 @@ define i32 @test2(ptr nocapture %a, i64 %n) { ; EPILOG-BLOCK-NEXT: %cmp.epil = icmp eq i64 %n, 42 ; EPILOG-BLOCK-NEXT: br i1 %cmp.epil, label %for.exit2, label %for.body.epil ; EPILOG-BLOCK: for.body.epil: -; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.unr +; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil.init ; EPILOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.epil, align 4 -; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.unr +; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.epil.init ; EPILOG-BLOCK-NEXT: br label %for.end ; EPILOG-BLOCK: for.end: ; EPILOG-BLOCK-NEXT: %sum.0.lcssa = phi i32 [ %sum.0.lcssa.ph, %for.end.unr-lcssa ], [ %add.epil, %for.body.epil ] @@ -560,7 +558,7 @@ define i32 @test2(ptr nocapture %a, i64 %n) { ; EPILOG-BLOCK-NEXT: %retval.ph = phi i32 [ 42, %for.exiting_block ], [ %sum.02, %header ], [ %add, %for.body ], [ 42, %for.exiting_block.1 ] ; EPILOG-BLOCK-NEXT: br label %for.exit2 ; EPILOG-BLOCK: for.exit2: -; EPILOG-BLOCK-NEXT: %retval = phi i32 [ %sum.02.unr, %header.epil ], [ 42, %for.exiting_block.epil ], [ %retval.ph, %for.exit2.loopexit ] +; EPILOG-BLOCK-NEXT: %retval = phi i32 [ %sum.02.epil.init, %header.epil ], [ 42, %for.exiting_block.epil ], [ %retval.ph, %for.exit2.loopexit ] ; EPILOG-BLOCK-NEXT: ret i32 %retval ; ; PROLOG-LABEL: @test2( @@ -796,7 +794,7 @@ define void @test3(i64 %trip, i64 %add, i1 %arg) { ; EPILOG-NEXT: %1 = add i64 %0, -1 ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %exit2.loopexit.unr-lcssa, label %entry.new +; EPILOG-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG: entry.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %loop_header @@ -812,7 +810,7 @@ define void @test3(i64 %trip, i64 %add, i1 %arg) { ; EPILOG-NEXT: ] ; EPILOG: exit3.loopexit: ; EPILOG-NEXT: br label %exit3 -; EPILOG: exit3.loopexit2: +; EPILOG: exit3.loopexit3: ; EPILOG-NEXT: br label %exit3 ; EPILOG: exit3: ; EPILOG-NEXT: ret void @@ -877,33 +875,33 @@ define void @test3(i64 %trip, i64 %add, i1 %arg) { ; EPILOG-NEXT: %sum.next.7 = add i64 %sum.next.6, %add ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp ne i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %exit2.loopexit.unr-lcssa.loopexit +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %exit2.loopexit.unr-lcssa ; EPILOG: exit1.loopexit: ; EPILOG-NEXT: br label %exit1 -; EPILOG: exit1.loopexit1: +; EPILOG: exit1.loopexit2: ; EPILOG-NEXT: br label %exit1 ; EPILOG: exit1: ; EPILOG-NEXT: ret void -; EPILOG: exit2.loopexit.unr-lcssa.loopexit: -; EPILOG-NEXT: %iv.unr.ph = phi i64 [ %iv_next.7, %loop_latch.7 ] -; EPILOG-NEXT: %sum.unr.ph = phi i64 [ %sum.next.7, %loop_latch.7 ] -; EPILOG-NEXT: br label %exit2.loopexit.unr-lcssa ; EPILOG: exit2.loopexit.unr-lcssa: -; EPILOG-NEXT: %iv.unr = phi i64 [ 0, %entry ], [ %iv.unr.ph, %exit2.loopexit.unr-lcssa.loopexit ] -; EPILOG-NEXT: %sum.unr = phi i64 [ 0, %entry ], [ %sum.unr.ph, %exit2.loopexit.unr-lcssa.loopexit ] +; EPILOG-NEXT: %iv.unr = phi i64 [ %iv_next.7, %loop_latch.7 ] +; EPILOG-NEXT: %sum.unr = phi i64 [ %sum.next.7, %loop_latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %exit2.loopexit ; EPILOG: loop_header.epil.preheader: +; EPILOG-NEXT: %iv.epil.init = phi i64 [ 0, %entry ], [ %iv.unr, %exit2.loopexit.unr-lcssa ] +; EPILOG-NEXT: %sum.epil.init = phi i64 [ 0, %entry ], [ %sum.unr, %exit2.loopexit.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-NEXT: br label %loop_header.epil ; EPILOG: loop_header.epil: -; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.unr, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] -; EPILOG-NEXT: %sum.epil = phi i64 [ %sum.unr, %loop_header.epil.preheader ], [ %sum.next.epil, %loop_latch.epil ] +; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.epil.init, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] +; EPILOG-NEXT: %sum.epil = phi i64 [ %sum.epil.init, %loop_header.epil.preheader ], [ %sum.next.epil, %loop_latch.epil ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %loop_header.epil.preheader ], [ %epil.iter.next, %loop_latch.epil ] ; EPILOG-NEXT: br i1 %arg, label %loop_latch.epil, label %loop_exiting_bb1.epil ; EPILOG: loop_exiting_bb1.epil: ; EPILOG-NEXT: switch i64 %sum.epil, label %loop_latch.epil [ -; EPILOG-NEXT: i64 24, label %exit1.loopexit1 -; EPILOG-NEXT: i64 42, label %exit3.loopexit2 +; EPILOG-NEXT: i64 24, label %exit1.loopexit2 +; EPILOG-NEXT: i64 42, label %exit3.loopexit3 ; EPILOG-NEXT: ] ; EPILOG: loop_latch.epil: ; EPILOG-NEXT: %iv_next.epil = add nuw nsw i64 %iv.epil, 1 @@ -923,7 +921,7 @@ define void @test3(i64 %trip, i64 %add, i1 %arg) { ; EPILOG-BLOCK-NEXT: %1 = add i64 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %exit2.loopexit.unr-lcssa, label %entry.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG-BLOCK: entry.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %loop_header @@ -954,24 +952,24 @@ define void @test3(i64 %trip, i64 %add, i1 %arg) { ; EPILOG-BLOCK-NEXT: %sum.next.1 = add i64 %sum.next, %add ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp ne i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %exit2.loopexit.unr-lcssa.loopexit, !llvm.loop !3 +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %exit2.loopexit.unr-lcssa, !llvm.loop !3 ; EPILOG-BLOCK: exit1.loopexit: ; EPILOG-BLOCK-NEXT: br label %exit1 ; EPILOG-BLOCK: exit1: ; EPILOG-BLOCK-NEXT: ret void -; EPILOG-BLOCK: exit2.loopexit.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: %sum.unr.ph = phi i64 [ %sum.next.1, %loop_latch.1 ] -; EPILOG-BLOCK-NEXT: br label %exit2.loopexit.unr-lcssa ; EPILOG-BLOCK: exit2.loopexit.unr-lcssa: -; EPILOG-BLOCK-NEXT: %sum.unr = phi i64 [ 0, %entry ], [ %sum.unr.ph, %exit2.loopexit.unr-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %sum.unr = phi i64 [ %sum.next.1, %loop_latch.1 ] ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %exit2.loopexit ; EPILOG-BLOCK: loop_header.epil.preheader: +; EPILOG-BLOCK-NEXT: %sum.epil.init = phi i64 [ 0, %entry ], [ %sum.unr, %exit2.loopexit.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-BLOCK-NEXT: br label %loop_header.epil ; EPILOG-BLOCK: loop_header.epil: ; EPILOG-BLOCK-NEXT: br i1 %arg, label %loop_latch.epil, label %loop_exiting_bb1.epil ; EPILOG-BLOCK: loop_exiting_bb1.epil: -; EPILOG-BLOCK-NEXT: switch i64 %sum.unr, label %loop_latch.epil [ +; EPILOG-BLOCK-NEXT: switch i64 %sum.epil.init, label %loop_latch.epil [ ; EPILOG-BLOCK-NEXT: i64 24, label %exit1 ; EPILOG-BLOCK-NEXT: i64 42, label %exit3 ; EPILOG-BLOCK-NEXT: ] @@ -1204,7 +1202,7 @@ define i32 @hdr_latch_same_exit(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-NEXT: %1 = add i64 %0, -1 ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %latchExit.unr-lcssa, label %entry.new +; EPILOG-NEXT: br i1 %2, label %header.epil.preheader, label %entry.new ; EPILOG: entry.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %header @@ -1286,28 +1284,27 @@ define i32 @hdr_latch_same_exit(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv, 8 ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp eq i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %latchExit.unr-lcssa.loopexit, label %header -; EPILOG: latchExit.unr-lcssa.loopexit: -; EPILOG-NEXT: %result.ph.ph = phi i32 [ %add.7, %latch.7 ] -; EPILOG-NEXT: %indvars.iv.unr.ph = phi i64 [ %indvars.iv.next.7, %latch.7 ] -; EPILOG-NEXT: %sum.02.unr.ph = phi i32 [ %add.7, %latch.7 ] -; EPILOG-NEXT: br label %latchExit.unr-lcssa +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %latchExit.unr-lcssa, label %header ; EPILOG: latchExit.unr-lcssa: -; EPILOG-NEXT: %result.ph = phi i32 [ poison, %entry ], [ %result.ph.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-NEXT: %indvars.iv.unr = phi i64 [ 0, %entry ], [ %indvars.iv.unr.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-NEXT: %sum.02.unr = phi i32 [ 0, %entry ], [ %sum.02.unr.ph, %latchExit.unr-lcssa.loopexit ] +; EPILOG-NEXT: %result.ph = phi i32 [ %add.7, %latch.7 ] +; EPILOG-NEXT: %indvars.iv.unr = phi i64 [ %indvars.iv.next.7, %latch.7 ] +; EPILOG-NEXT: %sum.02.unr = phi i32 [ %add.7, %latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %latchExit ; EPILOG: header.epil.preheader: +; EPILOG-NEXT: %indvars.iv.epil.init = phi i64 [ 0, %entry ], [ %indvars.iv.unr, %latchExit.unr-lcssa ] +; EPILOG-NEXT: %sum.02.epil.init = phi i32 [ 0, %entry ], [ %sum.02.unr, %latchExit.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod2 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-NEXT: br label %header.epil ; EPILOG: header.epil: -; EPILOG-NEXT: %indvars.iv.epil = phi i64 [ %indvars.iv.next.epil, %latch.epil ], [ %indvars.iv.unr, %header.epil.preheader ] -; EPILOG-NEXT: %sum.02.epil = phi i32 [ %add.epil, %latch.epil ], [ %sum.02.unr, %header.epil.preheader ] +; EPILOG-NEXT: %indvars.iv.epil = phi i64 [ %indvars.iv.next.epil, %latch.epil ], [ %indvars.iv.epil.init, %header.epil.preheader ] +; EPILOG-NEXT: %sum.02.epil = phi i32 [ %add.epil, %latch.epil ], [ %sum.02.epil.init, %header.epil.preheader ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %header.epil.preheader ], [ %epil.iter.next, %latch.epil ] -; EPILOG-NEXT: br i1 %cond, label %latchExit.epilog-lcssa.loopexit2, label %for.exiting_block.epil +; EPILOG-NEXT: br i1 %cond, label %latchExit.epilog-lcssa.loopexit3, label %for.exiting_block.epil ; EPILOG: for.exiting_block.epil: ; EPILOG-NEXT: %cmp.epil = icmp eq i64 %n, 42 -; EPILOG-NEXT: br i1 %cmp.epil, label %for.exit2.loopexit4, label %latch.epil +; EPILOG-NEXT: br i1 %cmp.epil, label %for.exit2.loopexit5, label %latch.epil ; EPILOG: latch.epil: ; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil ; EPILOG-NEXT: %11 = load i32, ptr %arrayidx.epil, align 4 @@ -1316,22 +1313,22 @@ define i32 @hdr_latch_same_exit(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-NEXT: %exitcond.epil = icmp eq i64 %indvars.iv.next.epil, %n ; EPILOG-NEXT: %epil.iter.next = add i64 %epil.iter, 1 ; EPILOG-NEXT: %epil.iter.cmp = icmp ne i64 %epil.iter.next, %xtraiter -; EPILOG-NEXT: br i1 %epil.iter.cmp, label %header.epil, label %latchExit.epilog-lcssa.loopexit2, !llvm.loop !4 +; EPILOG-NEXT: br i1 %epil.iter.cmp, label %header.epil, label %latchExit.epilog-lcssa.loopexit3, !llvm.loop !4 ; EPILOG: latchExit.epilog-lcssa.loopexit: ; EPILOG-NEXT: %result.ph1.ph = phi i32 [ 0, %header ], [ 0, %latch ], [ 0, %latch.1 ], [ 0, %latch.2 ], [ 0, %latch.3 ], [ 0, %latch.4 ], [ 0, %latch.5 ], [ 0, %latch.6 ] ; EPILOG-NEXT: br label %latchExit.epilog-lcssa -; EPILOG: latchExit.epilog-lcssa.loopexit2: -; EPILOG-NEXT: %result.ph1.ph3 = phi i32 [ 0, %header.epil ], [ %add.epil, %latch.epil ] +; EPILOG: latchExit.epilog-lcssa.loopexit3: +; EPILOG-NEXT: %result.ph1.ph4 = phi i32 [ 0, %header.epil ], [ %add.epil, %latch.epil ] ; EPILOG-NEXT: br label %latchExit.epilog-lcssa ; EPILOG: latchExit.epilog-lcssa: -; EPILOG-NEXT: %result.ph1 = phi i32 [ %result.ph1.ph, %latchExit.epilog-lcssa.loopexit ], [ %result.ph1.ph3, %latchExit.epilog-lcssa.loopexit2 ] +; EPILOG-NEXT: %result.ph1 = phi i32 [ %result.ph1.ph, %latchExit.epilog-lcssa.loopexit ], [ %result.ph1.ph4, %latchExit.epilog-lcssa.loopexit3 ] ; EPILOG-NEXT: br label %latchExit ; EPILOG: latchExit: ; EPILOG-NEXT: %result = phi i32 [ %result.ph, %latchExit.unr-lcssa ], [ %result.ph1, %latchExit.epilog-lcssa ] ; EPILOG-NEXT: ret i32 %result ; EPILOG: for.exit2.loopexit: ; EPILOG-NEXT: br label %for.exit2 -; EPILOG: for.exit2.loopexit4: +; EPILOG: for.exit2.loopexit5: ; EPILOG-NEXT: br label %for.exit2 ; EPILOG: for.exit2: ; EPILOG-NEXT: ret i32 42 @@ -1342,7 +1339,7 @@ define i32 @hdr_latch_same_exit(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-BLOCK-NEXT: %1 = add i64 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %latchExit.unr-lcssa, label %entry.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %header.epil.preheader, label %entry.new ; EPILOG-BLOCK: entry.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %header @@ -1370,19 +1367,18 @@ define i32 @hdr_latch_same_exit(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp eq i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %latchExit.unr-lcssa.loopexit, label %header, !llvm.loop !4 -; EPILOG-BLOCK: latchExit.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: %result.ph.ph = phi i32 [ %add.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: %indvars.iv.unr.ph = phi i64 [ %indvars.iv.next.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: %sum.02.unr.ph = phi i32 [ %add.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: br label %latchExit.unr-lcssa +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %latchExit.unr-lcssa, label %header, !llvm.loop !4 ; EPILOG-BLOCK: latchExit.unr-lcssa: -; EPILOG-BLOCK-NEXT: %result.ph = phi i32 [ poison, %entry ], [ %result.ph.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %indvars.iv.unr = phi i64 [ 0, %entry ], [ %indvars.iv.unr.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %sum.02.unr = phi i32 [ 0, %entry ], [ %sum.02.unr.ph, %latchExit.unr-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %result.ph = phi i32 [ %add.1, %latch.1 ] +; EPILOG-BLOCK-NEXT: %indvars.iv.unr = phi i64 [ %indvars.iv.next.1, %latch.1 ] +; EPILOG-BLOCK-NEXT: %sum.02.unr = phi i32 [ %add.1, %latch.1 ] ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %latchExit ; EPILOG-BLOCK: header.epil.preheader: +; EPILOG-BLOCK-NEXT: %indvars.iv.epil.init = phi i64 [ 0, %entry ], [ %indvars.iv.unr, %latchExit.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %sum.02.epil.init = phi i32 [ 0, %entry ], [ %sum.02.unr, %latchExit.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %lcmp.mod2 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-BLOCK-NEXT: br label %header.epil ; EPILOG-BLOCK: header.epil: ; EPILOG-BLOCK-NEXT: br i1 %cond, label %latchExit.epilog-lcssa, label %for.exiting_block.epil @@ -1390,9 +1386,9 @@ define i32 @hdr_latch_same_exit(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-BLOCK-NEXT: %cmp.epil = icmp eq i64 %n, 42 ; EPILOG-BLOCK-NEXT: br i1 %cmp.epil, label %for.exit2, label %latch.epil ; EPILOG-BLOCK: latch.epil: -; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.unr +; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil.init ; EPILOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.epil, align 4 -; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.unr +; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.epil.init ; EPILOG-BLOCK-NEXT: br label %latchExit.epilog-lcssa ; EPILOG-BLOCK: latchExit.epilog-lcssa.loopexit: ; EPILOG-BLOCK-NEXT: %result.ph1.ph = phi i32 [ 0, %header ], [ 0, %latch ] @@ -1644,7 +1640,7 @@ define i32 @otherblock_latch_same_exit(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-NEXT: %1 = add i64 %0, -1 ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %latchExit.unr-lcssa, label %entry.new +; EPILOG-NEXT: br i1 %2, label %header.epil.preheader, label %entry.new ; EPILOG: entry.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %header @@ -1726,28 +1722,27 @@ define i32 @otherblock_latch_same_exit(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv, 8 ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp eq i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %latchExit.unr-lcssa.loopexit, label %header -; EPILOG: latchExit.unr-lcssa.loopexit: -; EPILOG-NEXT: %result.ph.ph = phi i32 [ %add.7, %latch.7 ] -; EPILOG-NEXT: %indvars.iv.unr.ph = phi i64 [ %indvars.iv.next.7, %latch.7 ] -; EPILOG-NEXT: %sum.02.unr.ph = phi i32 [ %add.7, %latch.7 ] -; EPILOG-NEXT: br label %latchExit.unr-lcssa +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %latchExit.unr-lcssa, label %header ; EPILOG: latchExit.unr-lcssa: -; EPILOG-NEXT: %result.ph = phi i32 [ poison, %entry ], [ %result.ph.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-NEXT: %indvars.iv.unr = phi i64 [ 0, %entry ], [ %indvars.iv.unr.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-NEXT: %sum.02.unr = phi i32 [ 0, %entry ], [ %sum.02.unr.ph, %latchExit.unr-lcssa.loopexit ] +; EPILOG-NEXT: %result.ph = phi i32 [ %add.7, %latch.7 ] +; EPILOG-NEXT: %indvars.iv.unr = phi i64 [ %indvars.iv.next.7, %latch.7 ] +; EPILOG-NEXT: %sum.02.unr = phi i32 [ %add.7, %latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %latchExit ; EPILOG: header.epil.preheader: +; EPILOG-NEXT: %indvars.iv.epil.init = phi i64 [ 0, %entry ], [ %indvars.iv.unr, %latchExit.unr-lcssa ] +; EPILOG-NEXT: %sum.02.epil.init = phi i32 [ 0, %entry ], [ %sum.02.unr, %latchExit.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod2 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-NEXT: br label %header.epil ; EPILOG: header.epil: -; EPILOG-NEXT: %indvars.iv.epil = phi i64 [ %indvars.iv.next.epil, %latch.epil ], [ %indvars.iv.unr, %header.epil.preheader ] -; EPILOG-NEXT: %sum.02.epil = phi i32 [ %add.epil, %latch.epil ], [ %sum.02.unr, %header.epil.preheader ] +; EPILOG-NEXT: %indvars.iv.epil = phi i64 [ %indvars.iv.next.epil, %latch.epil ], [ %indvars.iv.epil.init, %header.epil.preheader ] +; EPILOG-NEXT: %sum.02.epil = phi i32 [ %add.epil, %latch.epil ], [ %sum.02.epil.init, %header.epil.preheader ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %header.epil.preheader ], [ %epil.iter.next, %latch.epil ] -; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit2, label %for.exiting_block.epil +; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit3, label %for.exiting_block.epil ; EPILOG: for.exiting_block.epil: ; EPILOG-NEXT: %cmp.epil = icmp eq i64 %n, 42 -; EPILOG-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa.loopexit3, label %latch.epil +; EPILOG-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa.loopexit4, label %latch.epil ; EPILOG: latch.epil: ; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil ; EPILOG-NEXT: %11 = load i32, ptr %arrayidx.epil, align 4 @@ -1756,22 +1751,22 @@ define i32 @otherblock_latch_same_exit(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-NEXT: %exitcond.epil = icmp eq i64 %indvars.iv.next.epil, %n ; EPILOG-NEXT: %epil.iter.next = add i64 %epil.iter, 1 ; EPILOG-NEXT: %epil.iter.cmp = icmp ne i64 %epil.iter.next, %xtraiter -; EPILOG-NEXT: br i1 %epil.iter.cmp, label %header.epil, label %latchExit.epilog-lcssa.loopexit3, !llvm.loop !5 +; EPILOG-NEXT: br i1 %epil.iter.cmp, label %header.epil, label %latchExit.epilog-lcssa.loopexit4, !llvm.loop !5 ; EPILOG: latchExit.epilog-lcssa.loopexit: ; EPILOG-NEXT: %result.ph1.ph = phi i32 [ 2, %for.exiting_block ], [ 2, %for.exiting_block.1 ], [ 2, %for.exiting_block.2 ], [ 2, %for.exiting_block.3 ], [ 2, %for.exiting_block.4 ], [ 2, %for.exiting_block.5 ], [ 2, %for.exiting_block.6 ], [ 2, %for.exiting_block.7 ] ; EPILOG-NEXT: br label %latchExit.epilog-lcssa -; EPILOG: latchExit.epilog-lcssa.loopexit3: -; EPILOG-NEXT: %result.ph1.ph4 = phi i32 [ 2, %for.exiting_block.epil ], [ %add.epil, %latch.epil ] +; EPILOG: latchExit.epilog-lcssa.loopexit4: +; EPILOG-NEXT: %result.ph1.ph5 = phi i32 [ 2, %for.exiting_block.epil ], [ %add.epil, %latch.epil ] ; EPILOG-NEXT: br label %latchExit.epilog-lcssa ; EPILOG: latchExit.epilog-lcssa: -; EPILOG-NEXT: %result.ph1 = phi i32 [ %result.ph1.ph, %latchExit.epilog-lcssa.loopexit ], [ %result.ph1.ph4, %latchExit.epilog-lcssa.loopexit3 ] +; EPILOG-NEXT: %result.ph1 = phi i32 [ %result.ph1.ph, %latchExit.epilog-lcssa.loopexit ], [ %result.ph1.ph5, %latchExit.epilog-lcssa.loopexit4 ] ; EPILOG-NEXT: br label %latchExit ; EPILOG: latchExit: ; EPILOG-NEXT: %result = phi i32 [ %result.ph, %latchExit.unr-lcssa ], [ %result.ph1, %latchExit.epilog-lcssa ] ; EPILOG-NEXT: ret i32 %result ; EPILOG: for.exit2.loopexit: ; EPILOG-NEXT: br label %for.exit2 -; EPILOG: for.exit2.loopexit2: +; EPILOG: for.exit2.loopexit3: ; EPILOG-NEXT: br label %for.exit2 ; EPILOG: for.exit2: ; EPILOG-NEXT: ret i32 42 @@ -1782,7 +1777,7 @@ define i32 @otherblock_latch_same_exit(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-BLOCK-NEXT: %1 = add i64 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %latchExit.unr-lcssa, label %entry.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %header.epil.preheader, label %entry.new ; EPILOG-BLOCK: entry.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %header @@ -1810,19 +1805,18 @@ define i32 @otherblock_latch_same_exit(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp eq i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %latchExit.unr-lcssa.loopexit, label %header, !llvm.loop !5 -; EPILOG-BLOCK: latchExit.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: %result.ph.ph = phi i32 [ %add.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: %indvars.iv.unr.ph = phi i64 [ %indvars.iv.next.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: %sum.02.unr.ph = phi i32 [ %add.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: br label %latchExit.unr-lcssa +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %latchExit.unr-lcssa, label %header, !llvm.loop !5 ; EPILOG-BLOCK: latchExit.unr-lcssa: -; EPILOG-BLOCK-NEXT: %result.ph = phi i32 [ poison, %entry ], [ %result.ph.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %indvars.iv.unr = phi i64 [ 0, %entry ], [ %indvars.iv.unr.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %sum.02.unr = phi i32 [ 0, %entry ], [ %sum.02.unr.ph, %latchExit.unr-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %result.ph = phi i32 [ %add.1, %latch.1 ] +; EPILOG-BLOCK-NEXT: %indvars.iv.unr = phi i64 [ %indvars.iv.next.1, %latch.1 ] +; EPILOG-BLOCK-NEXT: %sum.02.unr = phi i32 [ %add.1, %latch.1 ] ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %latchExit ; EPILOG-BLOCK: header.epil.preheader: +; EPILOG-BLOCK-NEXT: %indvars.iv.epil.init = phi i64 [ 0, %entry ], [ %indvars.iv.unr, %latchExit.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %sum.02.epil.init = phi i32 [ 0, %entry ], [ %sum.02.unr, %latchExit.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %lcmp.mod2 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-BLOCK-NEXT: br label %header.epil ; EPILOG-BLOCK: header.epil: ; EPILOG-BLOCK-NEXT: br i1 %cond, label %for.exit2, label %for.exiting_block.epil @@ -1830,9 +1824,9 @@ define i32 @otherblock_latch_same_exit(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-BLOCK-NEXT: %cmp.epil = icmp eq i64 %n, 42 ; EPILOG-BLOCK-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa, label %latch.epil ; EPILOG-BLOCK: latch.epil: -; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.unr +; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil.init ; EPILOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.epil, align 4 -; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.unr +; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.epil.init ; EPILOG-BLOCK-NEXT: br label %latchExit.epilog-lcssa ; EPILOG-BLOCK: latchExit.epilog-lcssa.loopexit: ; EPILOG-BLOCK-NEXT: %result.ph1.ph = phi i32 [ 2, %for.exiting_block ], [ 2, %for.exiting_block.1 ] @@ -2085,7 +2079,7 @@ define i32 @otherblock_latch_same_exit2(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-NEXT: %1 = add i64 %0, -1 ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %latchExit.unr-lcssa, label %entry.new +; EPILOG-NEXT: br i1 %2, label %header.epil.preheader, label %entry.new ; EPILOG: entry.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %header @@ -2167,28 +2161,27 @@ define i32 @otherblock_latch_same_exit2(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv, 8 ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp eq i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %latchExit.unr-lcssa.loopexit, label %header -; EPILOG: latchExit.unr-lcssa.loopexit: -; EPILOG-NEXT: %result.ph.ph = phi i32 [ %add.7, %latch.7 ] -; EPILOG-NEXT: %indvars.iv.unr.ph = phi i64 [ %indvars.iv.next.7, %latch.7 ] -; EPILOG-NEXT: %sum.02.unr.ph = phi i32 [ %add.7, %latch.7 ] -; EPILOG-NEXT: br label %latchExit.unr-lcssa +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %latchExit.unr-lcssa, label %header ; EPILOG: latchExit.unr-lcssa: -; EPILOG-NEXT: %result.ph = phi i32 [ poison, %entry ], [ %result.ph.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-NEXT: %indvars.iv.unr = phi i64 [ 0, %entry ], [ %indvars.iv.unr.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-NEXT: %sum.02.unr = phi i32 [ 0, %entry ], [ %sum.02.unr.ph, %latchExit.unr-lcssa.loopexit ] +; EPILOG-NEXT: %result.ph = phi i32 [ %add.7, %latch.7 ] +; EPILOG-NEXT: %indvars.iv.unr = phi i64 [ %indvars.iv.next.7, %latch.7 ] +; EPILOG-NEXT: %sum.02.unr = phi i32 [ %add.7, %latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %latchExit ; EPILOG: header.epil.preheader: +; EPILOG-NEXT: %indvars.iv.epil.init = phi i64 [ 0, %entry ], [ %indvars.iv.unr, %latchExit.unr-lcssa ] +; EPILOG-NEXT: %sum.02.epil.init = phi i32 [ 0, %entry ], [ %sum.02.unr, %latchExit.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod2 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-NEXT: br label %header.epil ; EPILOG: header.epil: -; EPILOG-NEXT: %indvars.iv.epil = phi i64 [ %indvars.iv.next.epil, %latch.epil ], [ %indvars.iv.unr, %header.epil.preheader ] -; EPILOG-NEXT: %sum.02.epil = phi i32 [ %add.epil, %latch.epil ], [ %sum.02.unr, %header.epil.preheader ] +; EPILOG-NEXT: %indvars.iv.epil = phi i64 [ %indvars.iv.next.epil, %latch.epil ], [ %indvars.iv.epil.init, %header.epil.preheader ] +; EPILOG-NEXT: %sum.02.epil = phi i32 [ %add.epil, %latch.epil ], [ %sum.02.epil.init, %header.epil.preheader ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %header.epil.preheader ], [ %epil.iter.next, %latch.epil ] -; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit2, label %for.exiting_block.epil +; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit3, label %for.exiting_block.epil ; EPILOG: for.exiting_block.epil: ; EPILOG-NEXT: %cmp.epil = icmp eq i64 %n, 42 -; EPILOG-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa.loopexit3, label %latch.epil +; EPILOG-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa.loopexit4, label %latch.epil ; EPILOG: latch.epil: ; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil ; EPILOG-NEXT: %11 = load i32, ptr %arrayidx.epil, align 4 @@ -2197,22 +2190,22 @@ define i32 @otherblock_latch_same_exit2(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-NEXT: %exitcond.epil = icmp eq i64 %indvars.iv.next.epil, %n ; EPILOG-NEXT: %epil.iter.next = add i64 %epil.iter, 1 ; EPILOG-NEXT: %epil.iter.cmp = icmp ne i64 %epil.iter.next, %xtraiter -; EPILOG-NEXT: br i1 %epil.iter.cmp, label %header.epil, label %latchExit.epilog-lcssa.loopexit3, !llvm.loop !6 +; EPILOG-NEXT: br i1 %epil.iter.cmp, label %header.epil, label %latchExit.epilog-lcssa.loopexit4, !llvm.loop !6 ; EPILOG: latchExit.epilog-lcssa.loopexit: ; EPILOG-NEXT: %result.ph1.ph = phi i32 [ %sum.02, %for.exiting_block ], [ %add, %for.exiting_block.1 ], [ %add.1, %for.exiting_block.2 ], [ %add.2, %for.exiting_block.3 ], [ %add.3, %for.exiting_block.4 ], [ %add.4, %for.exiting_block.5 ], [ %add.5, %for.exiting_block.6 ], [ %add.6, %for.exiting_block.7 ] ; EPILOG-NEXT: br label %latchExit.epilog-lcssa -; EPILOG: latchExit.epilog-lcssa.loopexit3: -; EPILOG-NEXT: %result.ph1.ph4 = phi i32 [ %sum.02.epil, %for.exiting_block.epil ], [ %add.epil, %latch.epil ] +; EPILOG: latchExit.epilog-lcssa.loopexit4: +; EPILOG-NEXT: %result.ph1.ph5 = phi i32 [ %sum.02.epil, %for.exiting_block.epil ], [ %add.epil, %latch.epil ] ; EPILOG-NEXT: br label %latchExit.epilog-lcssa ; EPILOG: latchExit.epilog-lcssa: -; EPILOG-NEXT: %result.ph1 = phi i32 [ %result.ph1.ph, %latchExit.epilog-lcssa.loopexit ], [ %result.ph1.ph4, %latchExit.epilog-lcssa.loopexit3 ] +; EPILOG-NEXT: %result.ph1 = phi i32 [ %result.ph1.ph, %latchExit.epilog-lcssa.loopexit ], [ %result.ph1.ph5, %latchExit.epilog-lcssa.loopexit4 ] ; EPILOG-NEXT: br label %latchExit ; EPILOG: latchExit: ; EPILOG-NEXT: %result = phi i32 [ %result.ph, %latchExit.unr-lcssa ], [ %result.ph1, %latchExit.epilog-lcssa ] ; EPILOG-NEXT: ret i32 %result ; EPILOG: for.exit2.loopexit: ; EPILOG-NEXT: br label %for.exit2 -; EPILOG: for.exit2.loopexit2: +; EPILOG: for.exit2.loopexit3: ; EPILOG-NEXT: br label %for.exit2 ; EPILOG: for.exit2: ; EPILOG-NEXT: ret i32 42 @@ -2223,7 +2216,7 @@ define i32 @otherblock_latch_same_exit2(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-BLOCK-NEXT: %1 = add i64 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %latchExit.unr-lcssa, label %entry.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %header.epil.preheader, label %entry.new ; EPILOG-BLOCK: entry.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %header @@ -2251,19 +2244,18 @@ define i32 @otherblock_latch_same_exit2(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp eq i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %latchExit.unr-lcssa.loopexit, label %header, !llvm.loop !6 -; EPILOG-BLOCK: latchExit.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: %result.ph.ph = phi i32 [ %add.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: %indvars.iv.unr.ph = phi i64 [ %indvars.iv.next.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: %sum.02.unr.ph = phi i32 [ %add.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: br label %latchExit.unr-lcssa +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %latchExit.unr-lcssa, label %header, !llvm.loop !6 ; EPILOG-BLOCK: latchExit.unr-lcssa: -; EPILOG-BLOCK-NEXT: %result.ph = phi i32 [ poison, %entry ], [ %result.ph.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %indvars.iv.unr = phi i64 [ 0, %entry ], [ %indvars.iv.unr.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %sum.02.unr = phi i32 [ 0, %entry ], [ %sum.02.unr.ph, %latchExit.unr-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %result.ph = phi i32 [ %add.1, %latch.1 ] +; EPILOG-BLOCK-NEXT: %indvars.iv.unr = phi i64 [ %indvars.iv.next.1, %latch.1 ] +; EPILOG-BLOCK-NEXT: %sum.02.unr = phi i32 [ %add.1, %latch.1 ] ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %latchExit ; EPILOG-BLOCK: header.epil.preheader: +; EPILOG-BLOCK-NEXT: %indvars.iv.epil.init = phi i64 [ 0, %entry ], [ %indvars.iv.unr, %latchExit.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %sum.02.epil.init = phi i32 [ 0, %entry ], [ %sum.02.unr, %latchExit.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %lcmp.mod2 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-BLOCK-NEXT: br label %header.epil ; EPILOG-BLOCK: header.epil: ; EPILOG-BLOCK-NEXT: br i1 %cond, label %for.exit2, label %for.exiting_block.epil @@ -2271,15 +2263,15 @@ define i32 @otherblock_latch_same_exit2(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-BLOCK-NEXT: %cmp.epil = icmp eq i64 %n, 42 ; EPILOG-BLOCK-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa, label %latch.epil ; EPILOG-BLOCK: latch.epil: -; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.unr +; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil.init ; EPILOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.epil, align 4 -; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.unr +; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.epil.init ; EPILOG-BLOCK-NEXT: br label %latchExit.epilog-lcssa ; EPILOG-BLOCK: latchExit.epilog-lcssa.loopexit: ; EPILOG-BLOCK-NEXT: %result.ph1.ph = phi i32 [ %sum.02, %for.exiting_block ], [ %add, %for.exiting_block.1 ] ; EPILOG-BLOCK-NEXT: br label %latchExit.epilog-lcssa ; EPILOG-BLOCK: latchExit.epilog-lcssa: -; EPILOG-BLOCK-NEXT: %result.ph1 = phi i32 [ %add.epil, %latch.epil ], [ %sum.02.unr, %for.exiting_block.epil ], [ %result.ph1.ph, %latchExit.epilog-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %result.ph1 = phi i32 [ %add.epil, %latch.epil ], [ %sum.02.epil.init, %for.exiting_block.epil ], [ %result.ph1.ph, %latchExit.epilog-lcssa.loopexit ] ; EPILOG-BLOCK-NEXT: br label %latchExit ; EPILOG-BLOCK: latchExit: ; EPILOG-BLOCK-NEXT: %result = phi i32 [ %result.ph, %latchExit.unr-lcssa ], [ %result.ph1, %latchExit.epilog-lcssa ] @@ -2527,7 +2519,7 @@ define i32 @otherblock_latch_same_exit3(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-NEXT: %1 = add i64 %0, -1 ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %latchExit.unr-lcssa, label %entry.new +; EPILOG-NEXT: br i1 %2, label %header.epil.preheader, label %entry.new ; EPILOG: entry.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %header @@ -2609,52 +2601,51 @@ define i32 @otherblock_latch_same_exit3(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv, 8 ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp eq i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %latchExit.unr-lcssa.loopexit, label %header -; EPILOG: latchExit.unr-lcssa.loopexit: -; EPILOG-NEXT: %result.ph.ph = phi i32 [ %add.7, %latch.7 ] -; EPILOG-NEXT: %indvars.iv.unr.ph = phi i64 [ %indvars.iv.next.7, %latch.7 ] -; EPILOG-NEXT: %sum.02.unr.ph = phi i32 [ %add.7, %latch.7 ] -; EPILOG-NEXT: br label %latchExit.unr-lcssa +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %latchExit.unr-lcssa, label %header ; EPILOG: latchExit.unr-lcssa: -; EPILOG-NEXT: %result.ph = phi i32 [ poison, %entry ], [ %result.ph.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-NEXT: %indvars.iv.unr = phi i64 [ 0, %entry ], [ %indvars.iv.unr.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-NEXT: %sum.02.unr = phi i32 [ 0, %entry ], [ %sum.02.unr.ph, %latchExit.unr-lcssa.loopexit ] +; EPILOG-NEXT: %result.ph = phi i32 [ %add.7, %latch.7 ] +; EPILOG-NEXT: %indvars.iv.unr = phi i64 [ %indvars.iv.next.7, %latch.7 ] +; EPILOG-NEXT: %sum.02.unr = phi i32 [ %add.7, %latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %latchExit ; EPILOG: header.epil.preheader: +; EPILOG-NEXT: %indvars.iv.epil.init = phi i64 [ 0, %entry ], [ %indvars.iv.unr, %latchExit.unr-lcssa ] +; EPILOG-NEXT: %sum.02.epil.init = phi i32 [ 0, %entry ], [ %sum.02.unr, %latchExit.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod2 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-NEXT: br label %header.epil ; EPILOG: header.epil: -; EPILOG-NEXT: %indvars.iv.epil = phi i64 [ %indvars.iv.next.epil, %latch.epil ], [ %indvars.iv.unr, %header.epil.preheader ] -; EPILOG-NEXT: %sum.02.epil = phi i32 [ %add.epil, %latch.epil ], [ %sum.02.unr, %header.epil.preheader ] +; EPILOG-NEXT: %indvars.iv.epil = phi i64 [ %indvars.iv.next.epil, %latch.epil ], [ %indvars.iv.epil.init, %header.epil.preheader ] +; EPILOG-NEXT: %sum.02.epil = phi i32 [ %add.epil, %latch.epil ], [ %sum.02.epil.init, %header.epil.preheader ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %header.epil.preheader ], [ %epil.iter.next, %latch.epil ] -; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit2, label %for.exiting_block.epil +; EPILOG-NEXT: br i1 %cond, label %for.exit2.loopexit3, label %for.exiting_block.epil ; EPILOG: for.exiting_block.epil: ; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil ; EPILOG-NEXT: %11 = load i32, ptr %arrayidx.epil, align 4 ; EPILOG-NEXT: %add.epil = add nsw i32 %11, %sum.02.epil ; EPILOG-NEXT: %cmp.epil = icmp eq i64 %n, 42 -; EPILOG-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa.loopexit3, label %latch.epil +; EPILOG-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa.loopexit4, label %latch.epil ; EPILOG: latch.epil: ; EPILOG-NEXT: %indvars.iv.next.epil = add i64 %indvars.iv.epil, 1 ; EPILOG-NEXT: %exitcond.epil = icmp eq i64 %indvars.iv.next.epil, %n ; EPILOG-NEXT: %epil.iter.next = add i64 %epil.iter, 1 ; EPILOG-NEXT: %epil.iter.cmp = icmp ne i64 %epil.iter.next, %xtraiter -; EPILOG-NEXT: br i1 %epil.iter.cmp, label %header.epil, label %latchExit.epilog-lcssa.loopexit3, !llvm.loop !7 +; EPILOG-NEXT: br i1 %epil.iter.cmp, label %header.epil, label %latchExit.epilog-lcssa.loopexit4, !llvm.loop !7 ; EPILOG: latchExit.epilog-lcssa.loopexit: ; EPILOG-NEXT: %result.ph1.ph = phi i32 [ %sum.02, %for.exiting_block ], [ %add, %for.exiting_block.1 ], [ %add.1, %for.exiting_block.2 ], [ %add.2, %for.exiting_block.3 ], [ %add.3, %for.exiting_block.4 ], [ %add.4, %for.exiting_block.5 ], [ %add.5, %for.exiting_block.6 ], [ %add.6, %for.exiting_block.7 ] ; EPILOG-NEXT: br label %latchExit.epilog-lcssa -; EPILOG: latchExit.epilog-lcssa.loopexit3: -; EPILOG-NEXT: %result.ph1.ph4 = phi i32 [ %sum.02.epil, %for.exiting_block.epil ], [ %add.epil, %latch.epil ] +; EPILOG: latchExit.epilog-lcssa.loopexit4: +; EPILOG-NEXT: %result.ph1.ph5 = phi i32 [ %sum.02.epil, %for.exiting_block.epil ], [ %add.epil, %latch.epil ] ; EPILOG-NEXT: br label %latchExit.epilog-lcssa ; EPILOG: latchExit.epilog-lcssa: -; EPILOG-NEXT: %result.ph1 = phi i32 [ %result.ph1.ph, %latchExit.epilog-lcssa.loopexit ], [ %result.ph1.ph4, %latchExit.epilog-lcssa.loopexit3 ] +; EPILOG-NEXT: %result.ph1 = phi i32 [ %result.ph1.ph, %latchExit.epilog-lcssa.loopexit ], [ %result.ph1.ph5, %latchExit.epilog-lcssa.loopexit4 ] ; EPILOG-NEXT: br label %latchExit ; EPILOG: latchExit: ; EPILOG-NEXT: %result = phi i32 [ %result.ph, %latchExit.unr-lcssa ], [ %result.ph1, %latchExit.epilog-lcssa ] ; EPILOG-NEXT: ret i32 %result ; EPILOG: for.exit2.loopexit: ; EPILOG-NEXT: br label %for.exit2 -; EPILOG: for.exit2.loopexit2: +; EPILOG: for.exit2.loopexit3: ; EPILOG-NEXT: br label %for.exit2 ; EPILOG: for.exit2: ; EPILOG-NEXT: ret i32 42 @@ -2665,7 +2656,7 @@ define i32 @otherblock_latch_same_exit3(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-BLOCK-NEXT: %1 = add i64 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %latchExit.unr-lcssa, label %entry.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %header.epil.preheader, label %entry.new ; EPILOG-BLOCK: entry.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %header @@ -2693,26 +2684,25 @@ define i32 @otherblock_latch_same_exit3(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp eq i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %latchExit.unr-lcssa.loopexit, label %header, !llvm.loop !7 -; EPILOG-BLOCK: latchExit.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: %result.ph.ph = phi i32 [ %add.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: %indvars.iv.unr.ph = phi i64 [ %indvars.iv.next.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: %sum.02.unr.ph = phi i32 [ %add.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: br label %latchExit.unr-lcssa +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %latchExit.unr-lcssa, label %header, !llvm.loop !7 ; EPILOG-BLOCK: latchExit.unr-lcssa: -; EPILOG-BLOCK-NEXT: %result.ph = phi i32 [ poison, %entry ], [ %result.ph.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %indvars.iv.unr = phi i64 [ 0, %entry ], [ %indvars.iv.unr.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %sum.02.unr = phi i32 [ 0, %entry ], [ %sum.02.unr.ph, %latchExit.unr-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %result.ph = phi i32 [ %add.1, %latch.1 ] +; EPILOG-BLOCK-NEXT: %indvars.iv.unr = phi i64 [ %indvars.iv.next.1, %latch.1 ] +; EPILOG-BLOCK-NEXT: %sum.02.unr = phi i32 [ %add.1, %latch.1 ] ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %latchExit ; EPILOG-BLOCK: header.epil.preheader: +; EPILOG-BLOCK-NEXT: %indvars.iv.epil.init = phi i64 [ 0, %entry ], [ %indvars.iv.unr, %latchExit.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %sum.02.epil.init = phi i32 [ 0, %entry ], [ %sum.02.unr, %latchExit.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %lcmp.mod2 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-BLOCK-NEXT: br label %header.epil ; EPILOG-BLOCK: header.epil: ; EPILOG-BLOCK-NEXT: br i1 %cond, label %for.exit2, label %for.exiting_block.epil ; EPILOG-BLOCK: for.exiting_block.epil: -; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.unr +; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil.init ; EPILOG-BLOCK-NEXT: %5 = load i32, ptr %arrayidx.epil, align 4 -; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.unr +; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %5, %sum.02.epil.init ; EPILOG-BLOCK-NEXT: %cmp.epil = icmp eq i64 %n, 42 ; EPILOG-BLOCK-NEXT: br i1 %cmp.epil, label %latchExit.epilog-lcssa, label %latch.epil ; EPILOG-BLOCK: latch.epil: @@ -2721,7 +2711,7 @@ define i32 @otherblock_latch_same_exit3(ptr nocapture %a, i64 %n, i1 %cond) { ; EPILOG-BLOCK-NEXT: %result.ph1.ph = phi i32 [ %sum.02, %for.exiting_block ], [ %add, %for.exiting_block.1 ] ; EPILOG-BLOCK-NEXT: br label %latchExit.epilog-lcssa ; EPILOG-BLOCK: latchExit.epilog-lcssa: -; EPILOG-BLOCK-NEXT: %result.ph1 = phi i32 [ %add.epil, %latch.epil ], [ %sum.02.unr, %for.exiting_block.epil ], [ %result.ph1.ph, %latchExit.epilog-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %result.ph1 = phi i32 [ %add.epil, %latch.epil ], [ %sum.02.epil.init, %for.exiting_block.epil ], [ %result.ph1.ph, %latchExit.epilog-lcssa.loopexit ] ; EPILOG-BLOCK-NEXT: br label %latchExit ; EPILOG-BLOCK: latchExit: ; EPILOG-BLOCK-NEXT: %result = phi i32 [ %result.ph, %latchExit.unr-lcssa ], [ %result.ph1, %latchExit.epilog-lcssa ] @@ -3013,7 +3003,7 @@ define void @unique_exit(i32 %N, i32 %M) { ; EPILOG-NEXT: %1 = add i32 %0, -1 ; EPILOG-NEXT: %xtraiter = and i32 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i32 %1, 7 -; EPILOG-NEXT: br i1 %2, label %latchExit.unr-lcssa, label %preheader.new +; EPILOG-NEXT: br i1 %2, label %header.epil.preheader, label %preheader.new ; EPILOG: preheader.new: ; EPILOG-NEXT: %unroll_iter = sub i32 %0, %xtraiter ; EPILOG-NEXT: br label %header @@ -3054,37 +3044,36 @@ define void @unique_exit(i32 %N, i32 %M) { ; EPILOG: latch.7: ; EPILOG-NEXT: %niter.next.7 = add nuw i32 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp ne i32 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %header, label %latchExit.unr-lcssa.loopexit -; EPILOG: latchExit.unr-lcssa.loopexit: -; EPILOG-NEXT: %i2.ph.ph.ph = phi i32 [ -1, %latch.7 ] -; EPILOG-NEXT: %i4.unr.ph = phi i32 [ %inc.7, %latch.7 ] -; EPILOG-NEXT: br label %latchExit.unr-lcssa +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %header, label %latchExit.unr-lcssa ; EPILOG: latchExit.unr-lcssa: -; EPILOG-NEXT: %i2.ph.ph = phi i32 [ poison, %preheader ], [ %i2.ph.ph.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-NEXT: %i4.unr = phi i32 [ 0, %preheader ], [ %i4.unr.ph, %latchExit.unr-lcssa.loopexit ] +; EPILOG-NEXT: %i2.ph.ph = phi i32 [ -1, %latch.7 ] +; EPILOG-NEXT: %i4.unr = phi i32 [ %inc.7, %latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i32 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %latchExit ; EPILOG: header.epil.preheader: +; EPILOG-NEXT: %i4.epil.init = phi i32 [ 0, %preheader ], [ %i4.unr, %latchExit.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod2 = icmp ne i32 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-NEXT: br label %header.epil ; EPILOG: header.epil: -; EPILOG-NEXT: %i4.epil = phi i32 [ %inc.epil, %latch.epil ], [ %i4.unr, %header.epil.preheader ] +; EPILOG-NEXT: %i4.epil = phi i32 [ %inc.epil, %latch.epil ], [ %i4.epil.init, %header.epil.preheader ] ; EPILOG-NEXT: %epil.iter = phi i32 [ 0, %header.epil.preheader ], [ %epil.iter.next, %latch.epil ] ; EPILOG-NEXT: %inc.epil = add nuw i32 %i4.epil, 1 ; EPILOG-NEXT: %cmp1.epil = icmp ult i32 %inc.epil, %N -; EPILOG-NEXT: br i1 %cmp1.epil, label %latch.epil, label %latchExit.epilog-lcssa.loopexit2 +; EPILOG-NEXT: br i1 %cmp1.epil, label %latch.epil, label %latchExit.epilog-lcssa.loopexit3 ; EPILOG: latch.epil: ; EPILOG-NEXT: %cmp.epil = icmp ult i32 %inc.epil, %M.shifted ; EPILOG-NEXT: %epil.iter.next = add i32 %epil.iter, 1 ; EPILOG-NEXT: %epil.iter.cmp = icmp ne i32 %epil.iter.next, %xtraiter -; EPILOG-NEXT: br i1 %epil.iter.cmp, label %header.epil, label %latchExit.epilog-lcssa.loopexit2, !llvm.loop !8 +; EPILOG-NEXT: br i1 %epil.iter.cmp, label %header.epil, label %latchExit.epilog-lcssa.loopexit3, !llvm.loop !8 ; EPILOG: latchExit.epilog-lcssa.loopexit: ; EPILOG-NEXT: %i2.ph.ph1.ph = phi i32 [ %i4, %header ], [ %inc, %latch ], [ %inc.1, %latch.1 ], [ %inc.2, %latch.2 ], [ %inc.3, %latch.3 ], [ %inc.4, %latch.4 ], [ %inc.5, %latch.5 ], [ %inc.6, %latch.6 ] ; EPILOG-NEXT: br label %latchExit.epilog-lcssa -; EPILOG: latchExit.epilog-lcssa.loopexit2: -; EPILOG-NEXT: %i2.ph.ph1.ph3 = phi i32 [ %i4.epil, %header.epil ], [ -1, %latch.epil ] +; EPILOG: latchExit.epilog-lcssa.loopexit3: +; EPILOG-NEXT: %i2.ph.ph1.ph4 = phi i32 [ %i4.epil, %header.epil ], [ -1, %latch.epil ] ; EPILOG-NEXT: br label %latchExit.epilog-lcssa ; EPILOG: latchExit.epilog-lcssa: -; EPILOG-NEXT: %i2.ph.ph1 = phi i32 [ %i2.ph.ph1.ph, %latchExit.epilog-lcssa.loopexit ], [ %i2.ph.ph1.ph3, %latchExit.epilog-lcssa.loopexit2 ] +; EPILOG-NEXT: %i2.ph.ph1 = phi i32 [ %i2.ph.ph1.ph, %latchExit.epilog-lcssa.loopexit ], [ %i2.ph.ph1.ph4, %latchExit.epilog-lcssa.loopexit3 ] ; EPILOG-NEXT: br label %latchExit ; EPILOG: latchExit: ; EPILOG-NEXT: %i2.ph = phi i32 [ %i2.ph.ph, %latchExit.unr-lcssa ], [ %i2.ph.ph1, %latchExit.epilog-lcssa ] @@ -3098,7 +3087,7 @@ define void @unique_exit(i32 %N, i32 %M) { ; EPILOG-BLOCK-NEXT: %1 = add i32 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i32 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i32 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %latchExit.unr-lcssa, label %preheader.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %header.epil.preheader, label %preheader.new ; EPILOG-BLOCK: preheader.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i32 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %header @@ -3115,20 +3104,19 @@ define void @unique_exit(i32 %N, i32 %M) { ; EPILOG-BLOCK: latch.1: ; EPILOG-BLOCK-NEXT: %niter.next.1 = add nuw i32 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp ne i32 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %header, label %latchExit.unr-lcssa.loopexit, !llvm.loop !8 -; EPILOG-BLOCK: latchExit.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: %i2.ph.ph.ph = phi i32 [ -1, %latch.1 ] -; EPILOG-BLOCK-NEXT: %i4.unr.ph = phi i32 [ %inc.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: br label %latchExit.unr-lcssa +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %header, label %latchExit.unr-lcssa, !llvm.loop !8 ; EPILOG-BLOCK: latchExit.unr-lcssa: -; EPILOG-BLOCK-NEXT: %i2.ph.ph = phi i32 [ poison, %preheader ], [ %i2.ph.ph.ph, %latchExit.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %i4.unr = phi i32 [ 0, %preheader ], [ %i4.unr.ph, %latchExit.unr-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %i2.ph.ph = phi i32 [ -1, %latch.1 ] +; EPILOG-BLOCK-NEXT: %i4.unr = phi i32 [ %inc.1, %latch.1 ] ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i32 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %latchExit ; EPILOG-BLOCK: header.epil.preheader: +; EPILOG-BLOCK-NEXT: %i4.epil.init = phi i32 [ 0, %preheader ], [ %i4.unr, %latchExit.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %lcmp.mod2 = icmp ne i32 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-BLOCK-NEXT: br label %header.epil ; EPILOG-BLOCK: header.epil: -; EPILOG-BLOCK-NEXT: %inc.epil = add nuw i32 %i4.unr, 1 +; EPILOG-BLOCK-NEXT: %inc.epil = add nuw i32 %i4.epil.init, 1 ; EPILOG-BLOCK-NEXT: %cmp1.epil = icmp ult i32 %inc.epil, %N ; EPILOG-BLOCK-NEXT: br i1 %cmp1.epil, label %latch.epil, label %latchExit.epilog-lcssa ; EPILOG-BLOCK: latch.epil: @@ -3137,7 +3125,7 @@ define void @unique_exit(i32 %N, i32 %M) { ; EPILOG-BLOCK-NEXT: %i2.ph.ph1.ph = phi i32 [ %i4, %header ], [ %inc, %latch ] ; EPILOG-BLOCK-NEXT: br label %latchExit.epilog-lcssa ; EPILOG-BLOCK: latchExit.epilog-lcssa: -; EPILOG-BLOCK-NEXT: %i2.ph.ph1 = phi i32 [ -1, %latch.epil ], [ %i4.unr, %header.epil ], [ %i2.ph.ph1.ph, %latchExit.epilog-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %i2.ph.ph1 = phi i32 [ -1, %latch.epil ], [ %i4.epil.init, %header.epil ], [ %i2.ph.ph1.ph, %latchExit.epilog-lcssa.loopexit ] ; EPILOG-BLOCK-NEXT: br label %latchExit ; EPILOG-BLOCK: latchExit: ; EPILOG-BLOCK-NEXT: %i2.ph = phi i32 [ %i2.ph.ph, %latchExit.unr-lcssa ], [ %i2.ph.ph1, %latchExit.epilog-lcssa ] @@ -3300,7 +3288,7 @@ define i64 @test5(i64 %trip, i64 %add, i1 %cond) { ; EPILOG-NEXT: %1 = add i64 %0, -1 ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %latchexit.unr-lcssa, label %entry.new +; EPILOG-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG: entry.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %loop_header @@ -3390,39 +3378,38 @@ define i64 @test5(i64 %trip, i64 %add, i1 %cond) { ; EPILOG-NEXT: %sum.next.7 = add i64 %sum.next.6, %add ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp ne i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %latchexit.unr-lcssa.loopexit +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %latchexit.unr-lcssa ; EPILOG: exit1.loopexit: ; EPILOG-NEXT: %result.ph = phi i64 [ %ivy, %loop_exiting ], [ %ivy, %loop_exiting ], [ %ivy.1, %loop_exiting.1 ], [ %ivy.1, %loop_exiting.1 ], [ %ivy.2, %loop_exiting.2 ], [ %ivy.2, %loop_exiting.2 ], [ %ivy.3, %loop_exiting.3 ], [ %ivy.3, %loop_exiting.3 ], [ %ivy.4, %loop_exiting.4 ], [ %ivy.4, %loop_exiting.4 ], [ %ivy.5, %loop_exiting.5 ], [ %ivy.5, %loop_exiting.5 ], [ %ivy.6, %loop_exiting.6 ], [ %ivy.6, %loop_exiting.6 ], [ %ivy.7, %loop_exiting.7 ], [ %ivy.7, %loop_exiting.7 ] ; EPILOG-NEXT: br label %exit1 -; EPILOG: exit1.loopexit2: -; EPILOG-NEXT: %result.ph3 = phi i64 [ %ivy.epil, %loop_exiting.epil ], [ %ivy.epil, %loop_exiting.epil ] +; EPILOG: exit1.loopexit3: +; EPILOG-NEXT: %result.ph4 = phi i64 [ %ivy.epil, %loop_exiting.epil ], [ %ivy.epil, %loop_exiting.epil ] ; EPILOG-NEXT: br label %exit1 ; EPILOG: exit1: -; EPILOG-NEXT: %result = phi i64 [ %result.ph, %exit1.loopexit ], [ %result.ph3, %exit1.loopexit2 ] +; EPILOG-NEXT: %result = phi i64 [ %result.ph, %exit1.loopexit ], [ %result.ph4, %exit1.loopexit3 ] ; EPILOG-NEXT: ret i64 %result -; EPILOG: latchexit.unr-lcssa.loopexit: -; EPILOG-NEXT: %sum.next.lcssa.ph.ph = phi i64 [ %sum.next.7, %loop_latch.7 ] -; EPILOG-NEXT: %iv.unr.ph = phi i64 [ %iv_next.7, %loop_latch.7 ] -; EPILOG-NEXT: %sum.unr.ph = phi i64 [ %sum.next.7, %loop_latch.7 ] -; EPILOG-NEXT: br label %latchexit.unr-lcssa ; EPILOG: latchexit.unr-lcssa: -; EPILOG-NEXT: %sum.next.lcssa.ph = phi i64 [ poison, %entry ], [ %sum.next.lcssa.ph.ph, %latchexit.unr-lcssa.loopexit ] -; EPILOG-NEXT: %iv.unr = phi i64 [ 0, %entry ], [ %iv.unr.ph, %latchexit.unr-lcssa.loopexit ] -; EPILOG-NEXT: %sum.unr = phi i64 [ 0, %entry ], [ %sum.unr.ph, %latchexit.unr-lcssa.loopexit ] +; EPILOG-NEXT: %sum.next.lcssa.ph = phi i64 [ %sum.next.7, %loop_latch.7 ] +; EPILOG-NEXT: %iv.unr = phi i64 [ %iv_next.7, %loop_latch.7 ] +; EPILOG-NEXT: %sum.unr = phi i64 [ %sum.next.7, %loop_latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %latchexit ; EPILOG: loop_header.epil.preheader: +; EPILOG-NEXT: %iv.epil.init = phi i64 [ 0, %entry ], [ %iv.unr, %latchexit.unr-lcssa ] +; EPILOG-NEXT: %sum.epil.init = phi i64 [ 0, %entry ], [ %sum.unr, %latchexit.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod2 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-NEXT: br label %loop_header.epil ; EPILOG: loop_header.epil: -; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.unr, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] -; EPILOG-NEXT: %sum.epil = phi i64 [ %sum.unr, %loop_header.epil.preheader ], [ %sum.next.epil, %loop_latch.epil ] +; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.epil.init, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] +; EPILOG-NEXT: %sum.epil = phi i64 [ %sum.epil.init, %loop_header.epil.preheader ], [ %sum.next.epil, %loop_latch.epil ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %loop_header.epil.preheader ], [ %epil.iter.next, %loop_latch.epil ] ; EPILOG-NEXT: br i1 %cond, label %loop_latch.epil, label %loop_exiting.epil ; EPILOG: loop_exiting.epil: ; EPILOG-NEXT: %ivy.epil = add i64 %iv.epil, %add ; EPILOG-NEXT: switch i64 %sum.epil, label %loop_latch.epil [ -; EPILOG-NEXT: i64 24, label %exit1.loopexit2 -; EPILOG-NEXT: i64 42, label %exit1.loopexit2 +; EPILOG-NEXT: i64 24, label %exit1.loopexit3 +; EPILOG-NEXT: i64 42, label %exit1.loopexit3 ; EPILOG-NEXT: ] ; EPILOG: loop_latch.epil: ; EPILOG-NEXT: %iv_next.epil = add nuw nsw i64 %iv.epil, 1 @@ -3444,7 +3431,7 @@ define i64 @test5(i64 %trip, i64 %add, i1 %cond) { ; EPILOG-BLOCK-NEXT: %1 = add i64 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %latchexit.unr-lcssa, label %entry.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG-BLOCK: entry.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %loop_header @@ -3474,36 +3461,35 @@ define i64 @test5(i64 %trip, i64 %add, i1 %cond) { ; EPILOG-BLOCK-NEXT: %sum.next.1 = add i64 %sum.next, %add ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp ne i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %latchexit.unr-lcssa.loopexit, !llvm.loop !9 +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %latchexit.unr-lcssa, !llvm.loop !9 ; EPILOG-BLOCK: exit1.loopexit: ; EPILOG-BLOCK-NEXT: %result.ph = phi i64 [ %ivy, %loop_exiting ], [ %ivy, %loop_exiting ], [ %ivy.1, %loop_exiting.1 ], [ %ivy.1, %loop_exiting.1 ] ; EPILOG-BLOCK-NEXT: br label %exit1 ; EPILOG-BLOCK: exit1: ; EPILOG-BLOCK-NEXT: %result = phi i64 [ %ivy.epil, %loop_exiting.epil ], [ %ivy.epil, %loop_exiting.epil ], [ %result.ph, %exit1.loopexit ] ; EPILOG-BLOCK-NEXT: ret i64 %result -; EPILOG-BLOCK: latchexit.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: %sum.next.lcssa.ph.ph = phi i64 [ %sum.next.1, %loop_latch.1 ] -; EPILOG-BLOCK-NEXT: %iv.unr.ph = phi i64 [ %iv_next.1, %loop_latch.1 ] -; EPILOG-BLOCK-NEXT: %sum.unr.ph = phi i64 [ %sum.next.1, %loop_latch.1 ] -; EPILOG-BLOCK-NEXT: br label %latchexit.unr-lcssa ; EPILOG-BLOCK: latchexit.unr-lcssa: -; EPILOG-BLOCK-NEXT: %sum.next.lcssa.ph = phi i64 [ poison, %entry ], [ %sum.next.lcssa.ph.ph, %latchexit.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %iv.unr = phi i64 [ 0, %entry ], [ %iv.unr.ph, %latchexit.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %sum.unr = phi i64 [ 0, %entry ], [ %sum.unr.ph, %latchexit.unr-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %sum.next.lcssa.ph = phi i64 [ %sum.next.1, %loop_latch.1 ] +; EPILOG-BLOCK-NEXT: %iv.unr = phi i64 [ %iv_next.1, %loop_latch.1 ] +; EPILOG-BLOCK-NEXT: %sum.unr = phi i64 [ %sum.next.1, %loop_latch.1 ] ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %latchexit ; EPILOG-BLOCK: loop_header.epil.preheader: +; EPILOG-BLOCK-NEXT: %iv.epil.init = phi i64 [ 0, %entry ], [ %iv.unr, %latchexit.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %sum.epil.init = phi i64 [ 0, %entry ], [ %sum.unr, %latchexit.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %lcmp.mod2 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-BLOCK-NEXT: br label %loop_header.epil ; EPILOG-BLOCK: loop_header.epil: ; EPILOG-BLOCK-NEXT: br i1 %cond, label %loop_latch.epil, label %loop_exiting.epil ; EPILOG-BLOCK: loop_exiting.epil: -; EPILOG-BLOCK-NEXT: %ivy.epil = add i64 %iv.unr, %add -; EPILOG-BLOCK-NEXT: switch i64 %sum.unr, label %loop_latch.epil [ +; EPILOG-BLOCK-NEXT: %ivy.epil = add i64 %iv.epil.init, %add +; EPILOG-BLOCK-NEXT: switch i64 %sum.epil.init, label %loop_latch.epil [ ; EPILOG-BLOCK-NEXT: i64 24, label %exit1 ; EPILOG-BLOCK-NEXT: i64 42, label %exit1 ; EPILOG-BLOCK-NEXT: ] ; EPILOG-BLOCK: loop_latch.epil: -; EPILOG-BLOCK-NEXT: %sum.next.epil = add i64 %sum.unr, %add +; EPILOG-BLOCK-NEXT: %sum.next.epil = add i64 %sum.epil.init, %add ; EPILOG-BLOCK-NEXT: br label %latchexit ; EPILOG-BLOCK: latchexit: ; EPILOG-BLOCK-NEXT: %sum.next.lcssa = phi i64 [ %sum.next.lcssa.ph, %latchexit.unr-lcssa ], [ %sum.next.epil, %loop_latch.epil ] @@ -3752,7 +3738,7 @@ define i32 @test6(ptr nocapture %a, i64 %n, i1 %cond, i32 %x) { ; EPILOG-NEXT: %1 = add i64 %0, -1 ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %latch_exit.unr-lcssa, label %entry.new +; EPILOG-NEXT: br i1 %2, label %header.epil.preheader, label %entry.new ; EPILOG: entry.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %header @@ -3834,28 +3820,27 @@ define i32 @test6(ptr nocapture %a, i64 %n, i1 %cond, i32 %x) { ; EPILOG-NEXT: %indvars.iv.next.7 = add i64 %indvars.iv, 8 ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp eq i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %latch_exit.unr-lcssa.loopexit, label %header -; EPILOG: latch_exit.unr-lcssa.loopexit: -; EPILOG-NEXT: %sum.0.lcssa.ph.ph = phi i32 [ %add.7, %latch.7 ] -; EPILOG-NEXT: %indvars.iv.unr.ph = phi i64 [ %indvars.iv.next.7, %latch.7 ] -; EPILOG-NEXT: %sum.02.unr.ph = phi i32 [ %add.7, %latch.7 ] -; EPILOG-NEXT: br label %latch_exit.unr-lcssa +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %latch_exit.unr-lcssa, label %header ; EPILOG: latch_exit.unr-lcssa: -; EPILOG-NEXT: %sum.0.lcssa.ph = phi i32 [ poison, %entry ], [ %sum.0.lcssa.ph.ph, %latch_exit.unr-lcssa.loopexit ] -; EPILOG-NEXT: %indvars.iv.unr = phi i64 [ 0, %entry ], [ %indvars.iv.unr.ph, %latch_exit.unr-lcssa.loopexit ] -; EPILOG-NEXT: %sum.02.unr = phi i32 [ 0, %entry ], [ %sum.02.unr.ph, %latch_exit.unr-lcssa.loopexit ] +; EPILOG-NEXT: %sum.0.lcssa.ph = phi i32 [ %add.7, %latch.7 ] +; EPILOG-NEXT: %indvars.iv.unr = phi i64 [ %indvars.iv.next.7, %latch.7 ] +; EPILOG-NEXT: %sum.02.unr = phi i32 [ %add.7, %latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %latch_exit ; EPILOG: header.epil.preheader: +; EPILOG-NEXT: %indvars.iv.epil.init = phi i64 [ 0, %entry ], [ %indvars.iv.unr, %latch_exit.unr-lcssa ] +; EPILOG-NEXT: %sum.02.epil.init = phi i32 [ 0, %entry ], [ %sum.02.unr, %latch_exit.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod2 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-NEXT: br label %header.epil ; EPILOG: header.epil: -; EPILOG-NEXT: %indvars.iv.epil = phi i64 [ %indvars.iv.next.epil, %latch.epil ], [ %indvars.iv.unr, %header.epil.preheader ] -; EPILOG-NEXT: %sum.02.epil = phi i32 [ %add.epil, %latch.epil ], [ %sum.02.unr, %header.epil.preheader ] +; EPILOG-NEXT: %indvars.iv.epil = phi i64 [ %indvars.iv.next.epil, %latch.epil ], [ %indvars.iv.epil.init, %header.epil.preheader ] +; EPILOG-NEXT: %sum.02.epil = phi i32 [ %add.epil, %latch.epil ], [ %sum.02.epil.init, %header.epil.preheader ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %header.epil.preheader ], [ %epil.iter.next, %latch.epil ] -; EPILOG-NEXT: br i1 false, label %for.exit2.loopexit2, label %for.exiting_block.epil +; EPILOG-NEXT: br i1 false, label %for.exit2.loopexit3, label %for.exiting_block.epil ; EPILOG: for.exiting_block.epil: ; EPILOG-NEXT: %cmp.epil = icmp eq i64 %n, 42 -; EPILOG-NEXT: br i1 %cmp.epil, label %for.exit2.loopexit2, label %latch.epil +; EPILOG-NEXT: br i1 %cmp.epil, label %for.exit2.loopexit3, label %latch.epil ; EPILOG: latch.epil: ; EPILOG-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil ; EPILOG-NEXT: %load.epil = load i32, ptr %arrayidx.epil, align 4 @@ -3874,11 +3859,11 @@ define i32 @test6(ptr nocapture %a, i64 %n, i1 %cond, i32 %x) { ; EPILOG: for.exit2.loopexit: ; EPILOG-NEXT: %retval.ph = phi i32 [ 42, %for.exiting_block ], [ %sum.02, %header ], [ %add, %latch ], [ 42, %for.exiting_block.1 ], [ %add.1, %latch.1 ], [ 42, %for.exiting_block.2 ], [ %add.2, %latch.2 ], [ 42, %for.exiting_block.3 ], [ %add.3, %latch.3 ], [ 42, %for.exiting_block.4 ], [ %add.4, %latch.4 ], [ 42, %for.exiting_block.5 ], [ %add.5, %latch.5 ], [ 42, %for.exiting_block.6 ], [ %add.6, %latch.6 ], [ 42, %for.exiting_block.7 ] ; EPILOG-NEXT: br label %for.exit2 -; EPILOG: for.exit2.loopexit2: -; EPILOG-NEXT: %retval.ph3 = phi i32 [ 42, %for.exiting_block.epil ], [ %sum.02.epil, %header.epil ] +; EPILOG: for.exit2.loopexit3: +; EPILOG-NEXT: %retval.ph4 = phi i32 [ 42, %for.exiting_block.epil ], [ %sum.02.epil, %header.epil ] ; EPILOG-NEXT: br label %for.exit2 ; EPILOG: for.exit2: -; EPILOG-NEXT: %retval = phi i32 [ %retval.ph, %for.exit2.loopexit ], [ %retval.ph3, %for.exit2.loopexit2 ] +; EPILOG-NEXT: %retval = phi i32 [ %retval.ph, %for.exit2.loopexit ], [ %retval.ph4, %for.exit2.loopexit3 ] ; EPILOG-NEXT: %addx = add i32 %retval, %x ; EPILOG-NEXT: br i1 %cond, label %exit_true, label %exit_false ; EPILOG: exit_true: @@ -3892,7 +3877,7 @@ define i32 @test6(ptr nocapture %a, i64 %n, i1 %cond, i32 %x) { ; EPILOG-BLOCK-NEXT: %1 = add i64 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %latch_exit.unr-lcssa, label %entry.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %header.epil.preheader, label %entry.new ; EPILOG-BLOCK: entry.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %header @@ -3920,19 +3905,18 @@ define i32 @test6(ptr nocapture %a, i64 %n, i1 %cond, i32 %x) { ; EPILOG-BLOCK-NEXT: %indvars.iv.next.1 = add i64 %indvars.iv, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp eq i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %latch_exit.unr-lcssa.loopexit, label %header, !llvm.loop !10 -; EPILOG-BLOCK: latch_exit.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: %sum.0.lcssa.ph.ph = phi i32 [ %add.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: %indvars.iv.unr.ph = phi i64 [ %indvars.iv.next.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: %sum.02.unr.ph = phi i32 [ %add.1, %latch.1 ] -; EPILOG-BLOCK-NEXT: br label %latch_exit.unr-lcssa +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %latch_exit.unr-lcssa, label %header, !llvm.loop !10 ; EPILOG-BLOCK: latch_exit.unr-lcssa: -; EPILOG-BLOCK-NEXT: %sum.0.lcssa.ph = phi i32 [ poison, %entry ], [ %sum.0.lcssa.ph.ph, %latch_exit.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %indvars.iv.unr = phi i64 [ 0, %entry ], [ %indvars.iv.unr.ph, %latch_exit.unr-lcssa.loopexit ] -; EPILOG-BLOCK-NEXT: %sum.02.unr = phi i32 [ 0, %entry ], [ %sum.02.unr.ph, %latch_exit.unr-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %sum.0.lcssa.ph = phi i32 [ %add.1, %latch.1 ] +; EPILOG-BLOCK-NEXT: %indvars.iv.unr = phi i64 [ %indvars.iv.next.1, %latch.1 ] +; EPILOG-BLOCK-NEXT: %sum.02.unr = phi i32 [ %add.1, %latch.1 ] ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %latch_exit ; EPILOG-BLOCK: header.epil.preheader: +; EPILOG-BLOCK-NEXT: %indvars.iv.epil.init = phi i64 [ 0, %entry ], [ %indvars.iv.unr, %latch_exit.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %sum.02.epil.init = phi i32 [ 0, %entry ], [ %sum.02.unr, %latch_exit.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %lcmp.mod2 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod2) ; EPILOG-BLOCK-NEXT: br label %header.epil ; EPILOG-BLOCK: header.epil: ; EPILOG-BLOCK-NEXT: br i1 false, label %for.exit2, label %for.exiting_block.epil @@ -3940,9 +3924,9 @@ define i32 @test6(ptr nocapture %a, i64 %n, i1 %cond, i32 %x) { ; EPILOG-BLOCK-NEXT: %cmp.epil = icmp eq i64 %n, 42 ; EPILOG-BLOCK-NEXT: br i1 %cmp.epil, label %for.exit2, label %latch.epil ; EPILOG-BLOCK: latch.epil: -; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.unr +; EPILOG-BLOCK-NEXT: %arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.epil.init ; EPILOG-BLOCK-NEXT: %load.epil = load i32, ptr %arrayidx.epil, align 4 -; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %load.epil, %sum.02.unr +; EPILOG-BLOCK-NEXT: %add.epil = add nsw i32 %load.epil, %sum.02.epil.init ; EPILOG-BLOCK-NEXT: br label %latch_exit ; EPILOG-BLOCK: latch_exit: ; EPILOG-BLOCK-NEXT: %sum.0.lcssa = phi i32 [ %sum.0.lcssa.ph, %latch_exit.unr-lcssa ], [ %add.epil, %latch.epil ] @@ -3951,7 +3935,7 @@ define i32 @test6(ptr nocapture %a, i64 %n, i1 %cond, i32 %x) { ; EPILOG-BLOCK-NEXT: %retval.ph = phi i32 [ 42, %for.exiting_block ], [ %sum.02, %header ], [ %add, %latch ], [ 42, %for.exiting_block.1 ] ; EPILOG-BLOCK-NEXT: br label %for.exit2 ; EPILOG-BLOCK: for.exit2: -; EPILOG-BLOCK-NEXT: %retval = phi i32 [ %sum.02.unr, %header.epil ], [ 42, %for.exiting_block.epil ], [ %retval.ph, %for.exit2.loopexit ] +; EPILOG-BLOCK-NEXT: %retval = phi i32 [ %sum.02.epil.init, %header.epil ], [ 42, %for.exiting_block.epil ], [ %retval.ph, %for.exit2.loopexit ] ; EPILOG-BLOCK-NEXT: %addx = add i32 %retval, %x ; EPILOG-BLOCK-NEXT: br i1 %cond, label %exit_true, label %exit_false ; EPILOG-BLOCK: exit_true: @@ -4213,7 +4197,7 @@ define i32 @test7(i32 %arg, i32 %arg1, i32 %arg2) { ; EPILOG-NEXT: %2 = add i64 %1, -1 ; EPILOG-NEXT: %xtraiter = and i64 %1, 7 ; EPILOG-NEXT: %3 = icmp ult i64 %2, 7 -; EPILOG-NEXT: br i1 %3, label %latchexit.unr-lcssa, label %preheader.new +; EPILOG-NEXT: br i1 %3, label %header.epil.preheader, label %preheader.new ; EPILOG: preheader.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %1, %xtraiter ; EPILOG-NEXT: br label %header @@ -4239,20 +4223,20 @@ define i32 @test7(i32 %arg, i32 %arg1, i32 %arg2) { ; EPILOG-NEXT: %add.7 = add nuw nsw i64 %i6, 8 ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp ne i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %header, label %latchexit.unr-lcssa.loopexit -; EPILOG: latchexit.unr-lcssa.loopexit: -; EPILOG-NEXT: %i6.unr.ph = phi i64 [ %add.7, %latch.7 ] -; EPILOG-NEXT: br label %latchexit.unr-lcssa +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %header, label %latchexit.unr-lcssa ; EPILOG: latchexit.unr-lcssa: -; EPILOG-NEXT: %i6.unr = phi i64 [ 1, %preheader ], [ %i6.unr.ph, %latchexit.unr-lcssa.loopexit ] +; EPILOG-NEXT: %i6.unr = phi i64 [ %add.7, %latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %latchexit ; EPILOG: header.epil.preheader: +; EPILOG-NEXT: %i6.epil.init = phi i64 [ 1, %preheader ], [ %i6.unr, %latchexit.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-NEXT: br label %header.epil ; EPILOG: header.epil: -; EPILOG-NEXT: %i6.epil = phi i64 [ %i6.unr, %header.epil.preheader ], [ %add.epil, %latch.epil ] +; EPILOG-NEXT: %i6.epil = phi i64 [ %i6.epil.init, %header.epil.preheader ], [ %add.epil, %latch.epil ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %header.epil.preheader ], [ %epil.iter.next, %latch.epil ] -; EPILOG-NEXT: br i1 false, label %loopexit1.loopexit1, label %latch.epil +; EPILOG-NEXT: br i1 false, label %loopexit1.loopexit2, label %latch.epil ; EPILOG: latch.epil: ; EPILOG-NEXT: %add.epil = add nuw nsw i64 %i6.epil, 1 ; EPILOG-NEXT: %i9.epil = icmp slt i64 %add.epil, %sext @@ -4268,11 +4252,11 @@ define i32 @test7(i32 %arg, i32 %arg1, i32 %arg2) { ; EPILOG: loopexit1.loopexit: ; EPILOG-NEXT: %sext3.ph = phi i32 [ %shft, %header ], [ %shft, %latch ], [ %shft, %latch.1 ], [ %shft, %latch.2 ], [ %shft, %latch.3 ], [ %shft, %latch.4 ], [ %shft, %latch.5 ], [ %shft, %latch.6 ] ; EPILOG-NEXT: br label %loopexit1 -; EPILOG: loopexit1.loopexit1: -; EPILOG-NEXT: %sext3.ph2 = phi i32 [ %shft, %header.epil ] +; EPILOG: loopexit1.loopexit2: +; EPILOG-NEXT: %sext3.ph3 = phi i32 [ %shft, %header.epil ] ; EPILOG-NEXT: br label %loopexit1 ; EPILOG: loopexit1: -; EPILOG-NEXT: %sext3 = phi i32 [ %sext3.ph, %loopexit1.loopexit ], [ %sext3.ph2, %loopexit1.loopexit1 ] +; EPILOG-NEXT: %sext3 = phi i32 [ %sext3.ph, %loopexit1.loopexit ], [ %sext3.ph3, %loopexit1.loopexit2 ] ; EPILOG-NEXT: ret i32 %sext3 ; ; EPILOG-BLOCK-LABEL: @test7( @@ -4287,7 +4271,7 @@ define i32 @test7(i32 %arg, i32 %arg1, i32 %arg2) { ; EPILOG-BLOCK-NEXT: %2 = add i64 %1, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %1, 1 ; EPILOG-BLOCK-NEXT: %3 = icmp ult i64 %2, 1 -; EPILOG-BLOCK-NEXT: br i1 %3, label %latchexit.unr-lcssa, label %preheader.new +; EPILOG-BLOCK-NEXT: br i1 %3, label %header.epil.preheader, label %preheader.new ; EPILOG-BLOCK: preheader.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %1, %xtraiter ; EPILOG-BLOCK-NEXT: br label %header @@ -4301,13 +4285,13 @@ define i32 @test7(i32 %arg, i32 %arg1, i32 %arg2) { ; EPILOG-BLOCK-NEXT: %add.1 = add nuw nsw i64 %i6, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp ne i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %header, label %latchexit.unr-lcssa.loopexit, !llvm.loop !11 -; EPILOG-BLOCK: latchexit.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: br label %latchexit.unr-lcssa +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %header, label %latchexit.unr-lcssa, !llvm.loop !11 ; EPILOG-BLOCK: latchexit.unr-lcssa: ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %latchexit ; EPILOG-BLOCK: header.epil.preheader: +; EPILOG-BLOCK-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-BLOCK-NEXT: br label %header.epil ; EPILOG-BLOCK: header.epil: ; EPILOG-BLOCK-NEXT: br i1 false, label %loopexit1, label %latch.epil @@ -4480,7 +4464,7 @@ define void @test8() { ; EPILOG-NEXT: br label %outerloop ; EPILOG: outerloop.loopexit.loopexit: ; EPILOG-NEXT: br label %outerloop.loopexit -; EPILOG: outerloop.loopexit.loopexit1: +; EPILOG: outerloop.loopexit.loopexit2: ; EPILOG-NEXT: br label %outerloop.loopexit ; EPILOG: outerloop.loopexit: ; EPILOG-NEXT: br label %outerloop @@ -4490,7 +4474,7 @@ define void @test8() { ; EPILOG-NEXT: %1 = sub i64 99, %i ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %exit.unr-lcssa, label %outerloop.new +; EPILOG-NEXT: br i1 %2, label %innerH.epil.preheader, label %outerloop.new ; EPILOG: outerloop.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %innerH @@ -4516,21 +4500,21 @@ define void @test8() { ; EPILOG: latch.7: ; EPILOG-NEXT: %niter.next.7 = add nuw nsw i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp ne i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %innerH, label %exit.unr-lcssa.loopexit -; EPILOG: exit.unr-lcssa.loopexit: -; EPILOG-NEXT: %i3.unr.ph = phi i64 [ %i4.7, %latch.7 ] -; EPILOG-NEXT: br label %exit.unr-lcssa +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %innerH, label %exit.unr-lcssa ; EPILOG: exit.unr-lcssa: -; EPILOG-NEXT: %i3.unr = phi i64 [ %i, %outerloop ], [ %i3.unr.ph, %exit.unr-lcssa.loopexit ] +; EPILOG-NEXT: %i3.unr = phi i64 [ %i4.7, %latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %innerH.epil.preheader, label %exit.loopexit ; EPILOG: innerH.epil.preheader: +; EPILOG-NEXT: %i3.epil.init = phi i64 [ %i, %outerloop ], [ %i3.unr, %exit.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-NEXT: br label %innerH.epil ; EPILOG: innerH.epil: -; EPILOG-NEXT: %i3.epil = phi i64 [ %i4.epil, %latch.epil ], [ %i3.unr, %innerH.epil.preheader ] +; EPILOG-NEXT: %i3.epil = phi i64 [ %i4.epil, %latch.epil ], [ %i3.epil.init, %innerH.epil.preheader ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %innerH.epil.preheader ], [ %epil.iter.next, %latch.epil ] ; EPILOG-NEXT: %i4.epil = add nuw nsw i64 %i3.epil, 1 -; EPILOG-NEXT: br i1 false, label %outerloop.loopexit.loopexit1, label %latch.epil +; EPILOG-NEXT: br i1 false, label %outerloop.loopexit.loopexit2, label %latch.epil ; EPILOG: latch.epil: ; EPILOG-NEXT: %i6.epil = icmp ult i64 %i4.epil, 100 ; EPILOG-NEXT: %epil.iter.next = add i64 %epil.iter, 1 @@ -4549,27 +4533,26 @@ define void @test8() { ; EPILOG-BLOCK: outerloop.loopexit.loopexit: ; EPILOG-BLOCK-NEXT: br label %outerloop.loopexit ; EPILOG-BLOCK: outerloop.loopexit: -; EPILOG-BLOCK-NEXT: br i1 false, label %exit.unr-lcssa.1, label %outerloop.new.1 +; EPILOG-BLOCK-NEXT: br i1 false, label %innerH.epil.preheader.1, label %outerloop.new.1 ; EPILOG-BLOCK: outerloop.new.1: ; EPILOG-BLOCK-NEXT: br label %innerH.1 ; EPILOG-BLOCK: innerH.1: ; EPILOG-BLOCK-NEXT: %i3.1 = phi i64 [ 0, %outerloop.new.1 ], [ %i4.1.1, %latch.1.1 ] ; EPILOG-BLOCK-NEXT: %niter.1 = phi i64 [ 0, %outerloop.new.1 ], [ %niter.next.1.1, %latch.1.1 ] -; EPILOG-BLOCK-NEXT: br i1 false, label %outerloop.loopexit.loopexit.1, label %latch.12 -; EPILOG-BLOCK: latch.12: +; EPILOG-BLOCK-NEXT: br i1 false, label %outerloop.loopexit.loopexit.1, label %latch.13 +; EPILOG-BLOCK: latch.13: ; EPILOG-BLOCK-NEXT: %i4.1.1 = add nuw nsw i64 %i3.1, 2 ; EPILOG-BLOCK-NEXT: br i1 false, label %outerloop.loopexit.loopexit.1, label %latch.1.1 ; EPILOG-BLOCK: latch.1.1: ; EPILOG-BLOCK-NEXT: %niter.next.1.1 = add i64 %niter.1, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1.1 = icmp ne i64 %niter.next.1.1, 100 -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1.1, label %innerH.1, label %exit.unr-lcssa.loopexit.1, !llvm.loop !12 -; EPILOG-BLOCK: exit.unr-lcssa.loopexit.1: -; EPILOG-BLOCK-NEXT: br label %exit.unr-lcssa.1 -; EPILOG-BLOCK: outerloop.loopexit.loopexit.1: -; EPILOG-BLOCK-NEXT: br label %outerloop.loopexit.1 +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1.1, label %innerH.1, label %exit.unr-lcssa.1, !llvm.loop !12 ; EPILOG-BLOCK: exit.unr-lcssa.1: ; EPILOG-BLOCK-NEXT: br i1 false, label %innerH.epil.preheader.1, label %exit.loopexit +; EPILOG-BLOCK: outerloop.loopexit.loopexit.1: +; EPILOG-BLOCK-NEXT: br label %outerloop.loopexit.1 ; EPILOG-BLOCK: innerH.epil.preheader.1: +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 false) ; EPILOG-BLOCK-NEXT: br label %innerH.epil.1 ; EPILOG-BLOCK: innerH.epil.1: ; EPILOG-BLOCK-NEXT: br i1 false, label %outerloop.loopexit.1, label %latch.epil @@ -4581,7 +4564,7 @@ define void @test8() { ; EPILOG-BLOCK-NEXT: %1 = sub i64 99, %i ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %exit.unr-lcssa, label %outerloop.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %innerH.epil.preheader, label %outerloop.new ; EPILOG-BLOCK: outerloop.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %innerH @@ -4595,13 +4578,13 @@ define void @test8() { ; EPILOG-BLOCK: latch.1: ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp ne i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %innerH, label %exit.unr-lcssa.loopexit, !llvm.loop !12 -; EPILOG-BLOCK: exit.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: br label %exit.unr-lcssa +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %innerH, label %exit.unr-lcssa, !llvm.loop !12 ; EPILOG-BLOCK: exit.unr-lcssa: ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %innerH.epil.preheader, label %exit.loopexit ; EPILOG-BLOCK: innerH.epil.preheader: +; EPILOG-BLOCK-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-BLOCK-NEXT: br label %innerH.epil ; EPILOG-BLOCK: innerH.epil: ; EPILOG-BLOCK-NEXT: br i1 false, label %outerloop.loopexit, label %latch.epil @@ -4788,7 +4771,7 @@ define ptr addrspace(1) @test9(ptr nocapture readonly %arg, i32 %n) { ; EPILOG-NEXT: %2 = add i32 %1, -1 ; EPILOG-NEXT: %xtraiter = and i32 %1, 7 ; EPILOG-NEXT: %3 = icmp ult i32 %2, 7 -; EPILOG-NEXT: br i1 %3, label %outerLatch.loopexit.unr-lcssa, label %preheader.new +; EPILOG-NEXT: br i1 %3, label %header.epil.preheader, label %preheader.new ; EPILOG: preheader.new: ; EPILOG-NEXT: %unroll_iter = sub i32 %1, %xtraiter ; EPILOG-NEXT: br label %header @@ -4799,11 +4782,11 @@ define ptr addrspace(1) @test9(ptr nocapture readonly %arg, i32 %n) { ; EPILOG: innerexit.loopexit: ; EPILOG-NEXT: %trip.lcssa.ph = phi i32 [ %trip, %header ], [ %trip, %latch ], [ %trip, %latch.1 ], [ %trip, %latch.2 ], [ %trip, %latch.3 ], [ %trip, %latch.4 ], [ %trip, %latch.5 ], [ %trip, %latch.6 ] ; EPILOG-NEXT: br label %innerexit -; EPILOG: innerexit.loopexit1: -; EPILOG-NEXT: %trip.lcssa.ph2 = phi i32 [ %trip, %header.epil ] +; EPILOG: innerexit.loopexit2: +; EPILOG-NEXT: %trip.lcssa.ph3 = phi i32 [ %trip, %header.epil ] ; EPILOG-NEXT: br label %innerexit ; EPILOG: innerexit: -; EPILOG-NEXT: %trip.lcssa = phi i32 [ %trip.lcssa.ph, %innerexit.loopexit ], [ %trip.lcssa.ph2, %innerexit.loopexit1 ] +; EPILOG-NEXT: %trip.lcssa = phi i32 [ %trip.lcssa.ph, %innerexit.loopexit ], [ %trip.lcssa.ph3, %innerexit.loopexit2 ] ; EPILOG-NEXT: %i9 = call ptr addrspace(1) @foo(i32 %trip.lcssa) ; EPILOG-NEXT: ret ptr addrspace(1) %i9 ; EPILOG: latch: @@ -4824,21 +4807,21 @@ define ptr addrspace(1) @test9(ptr nocapture readonly %arg, i32 %n) { ; EPILOG-NEXT: %iv.next.7 = add nuw nsw i64 %phi, 8 ; EPILOG-NEXT: %niter.next.7 = add i32 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp ne i32 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %header, label %outerLatch.loopexit.unr-lcssa.loopexit -; EPILOG: outerLatch.loopexit.unr-lcssa.loopexit: -; EPILOG-NEXT: %phi.unr.ph = phi i64 [ %iv.next.7, %latch.7 ] -; EPILOG-NEXT: br label %outerLatch.loopexit.unr-lcssa +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %header, label %outerLatch.loopexit.unr-lcssa ; EPILOG: outerLatch.loopexit.unr-lcssa: -; EPILOG-NEXT: %phi.unr = phi i64 [ %i4, %preheader ], [ %phi.unr.ph, %outerLatch.loopexit.unr-lcssa.loopexit ] +; EPILOG-NEXT: %phi.unr = phi i64 [ %iv.next.7, %latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i32 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %outerLatch.loopexit ; EPILOG: header.epil.preheader: +; EPILOG-NEXT: %phi.epil.init = phi i64 [ %i4, %preheader ], [ %phi.unr, %outerLatch.loopexit.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod1 = icmp ne i32 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-NEXT: br label %header.epil ; EPILOG: header.epil: -; EPILOG-NEXT: %phi.epil = phi i64 [ %phi.unr, %header.epil.preheader ], [ %iv.next.epil, %latch.epil ] +; EPILOG-NEXT: %phi.epil = phi i64 [ %phi.epil.init, %header.epil.preheader ], [ %iv.next.epil, %latch.epil ] ; EPILOG-NEXT: %epil.iter = phi i32 [ 0, %header.epil.preheader ], [ %epil.iter.next, %latch.epil ] ; EPILOG-NEXT: %i7.epil = trunc i64 %phi.epil to i32 -; EPILOG-NEXT: br i1 true, label %latch.epil, label %innerexit.loopexit1 +; EPILOG-NEXT: br i1 true, label %latch.epil, label %innerexit.loopexit2 ; EPILOG: latch.epil: ; EPILOG-NEXT: %i11.epil = add nsw i32 %i7.epil, 1 ; EPILOG-NEXT: %innercnd.epil = icmp slt i32 %i11.epil, %trip @@ -4866,7 +4849,7 @@ define ptr addrspace(1) @test9(ptr nocapture readonly %arg, i32 %n) { ; EPILOG-BLOCK-NEXT: %2 = add i32 %1, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i32 %1, 1 ; EPILOG-BLOCK-NEXT: %3 = icmp ult i32 %2, 1 -; EPILOG-BLOCK-NEXT: br i1 %3, label %outerLatch.loopexit.unr-lcssa, label %preheader.new +; EPILOG-BLOCK-NEXT: br i1 %3, label %header.epil.preheader, label %preheader.new ; EPILOG-BLOCK: preheader.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i32 %1, %xtraiter ; EPILOG-BLOCK-NEXT: br label %header @@ -4877,17 +4860,17 @@ define ptr addrspace(1) @test9(ptr nocapture readonly %arg, i32 %n) { ; EPILOG-BLOCK: innerexit.loopexit.loopexit: ; EPILOG-BLOCK-NEXT: %trip.lcssa.ph.ph = phi i32 [ %trip, %latch ], [ %trip, %header ] ; EPILOG-BLOCK-NEXT: br label %innerexit.loopexit -; EPILOG-BLOCK: innerexit.loopexit.loopexit4: -; EPILOG-BLOCK-NEXT: %trip.lcssa.ph.ph5 = phi i32 [ %trip.1, %latch.13 ], [ %trip.1, %header.1 ] +; EPILOG-BLOCK: innerexit.loopexit.loopexit5: +; EPILOG-BLOCK-NEXT: %trip.lcssa.ph.ph6 = phi i32 [ %trip.1, %latch.14 ], [ %trip.1, %header.1 ] ; EPILOG-BLOCK-NEXT: br label %innerexit.loopexit ; EPILOG-BLOCK: innerexit.loopexit: -; EPILOG-BLOCK-NEXT: %trip.lcssa.ph = phi i32 [ %trip.lcssa.ph.ph, %innerexit.loopexit.loopexit ], [ %trip.lcssa.ph.ph5, %innerexit.loopexit.loopexit4 ] +; EPILOG-BLOCK-NEXT: %trip.lcssa.ph = phi i32 [ %trip.lcssa.ph.ph, %innerexit.loopexit.loopexit ], [ %trip.lcssa.ph.ph6, %innerexit.loopexit.loopexit5 ] ; EPILOG-BLOCK-NEXT: br label %innerexit -; EPILOG-BLOCK: innerexit.loopexit1: -; EPILOG-BLOCK-NEXT: %trip.lcssa.ph2 = phi i32 [ %trip, %header.epil ], [ %trip.1, %header.epil.1 ] +; EPILOG-BLOCK: innerexit.loopexit2: +; EPILOG-BLOCK-NEXT: %trip.lcssa.ph3 = phi i32 [ %trip, %header.epil ], [ %trip.1, %header.epil.1 ] ; EPILOG-BLOCK-NEXT: br label %innerexit ; EPILOG-BLOCK: innerexit: -; EPILOG-BLOCK-NEXT: %trip.lcssa = phi i32 [ %trip.lcssa.ph, %innerexit.loopexit ], [ %trip.lcssa.ph2, %innerexit.loopexit1 ] +; EPILOG-BLOCK-NEXT: %trip.lcssa = phi i32 [ %trip.lcssa.ph, %innerexit.loopexit ], [ %trip.lcssa.ph3, %innerexit.loopexit2 ] ; EPILOG-BLOCK-NEXT: %i9 = call ptr addrspace(1) @foo(i32 %trip.lcssa) ; EPILOG-BLOCK-NEXT: ret ptr addrspace(1) %i9 ; EPILOG-BLOCK: latch: @@ -4896,16 +4879,16 @@ define ptr addrspace(1) @test9(ptr nocapture readonly %arg, i32 %n) { ; EPILOG-BLOCK-NEXT: %iv.next.1 = add nuw nsw i64 %phi, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i32 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp ne i32 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %header, label %outerLatch.loopexit.unr-lcssa.loopexit, !llvm.loop !14 -; EPILOG-BLOCK: outerLatch.loopexit.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: br label %outerLatch.loopexit.unr-lcssa +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %header, label %outerLatch.loopexit.unr-lcssa, !llvm.loop !14 ; EPILOG-BLOCK: outerLatch.loopexit.unr-lcssa: ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i32 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %header.epil.preheader, label %outerLatch.loopexit ; EPILOG-BLOCK: header.epil.preheader: +; EPILOG-BLOCK-NEXT: %lcmp.mod1 = icmp ne i32 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-BLOCK-NEXT: br label %header.epil ; EPILOG-BLOCK: header.epil: -; EPILOG-BLOCK-NEXT: br i1 true, label %latch.epil, label %innerexit.loopexit1 +; EPILOG-BLOCK-NEXT: br i1 true, label %latch.epil, label %innerexit.loopexit2 ; EPILOG-BLOCK: latch.epil: ; EPILOG-BLOCK-NEXT: br label %outerLatch.loopexit ; EPILOG-BLOCK: outerLatch.loopexit: @@ -4919,30 +4902,30 @@ define ptr addrspace(1) @test9(ptr nocapture readonly %arg, i32 %n) { ; EPILOG-BLOCK-NEXT: %5 = add i32 %4, -1 ; EPILOG-BLOCK-NEXT: %xtraiter.1 = and i32 %4, 1 ; EPILOG-BLOCK-NEXT: %6 = icmp ult i32 %5, 1 -; EPILOG-BLOCK-NEXT: br i1 %6, label %outerLatch.loopexit.unr-lcssa.1, label %preheader.new.1 +; EPILOG-BLOCK-NEXT: br i1 %6, label %header.epil.preheader.1, label %preheader.new.1 ; EPILOG-BLOCK: preheader.new.1: ; EPILOG-BLOCK-NEXT: %unroll_iter.1 = sub i32 %4, %xtraiter.1 ; EPILOG-BLOCK-NEXT: br label %header.1 ; EPILOG-BLOCK: header.1: ; EPILOG-BLOCK-NEXT: %phi.1 = phi i64 [ 0, %preheader.new.1 ], [ %iv.next.1.1, %latch.1.1 ] ; EPILOG-BLOCK-NEXT: %niter.1 = phi i32 [ 0, %preheader.new.1 ], [ %niter.next.1.1, %latch.1.1 ] -; EPILOG-BLOCK-NEXT: br i1 true, label %latch.13, label %innerexit.loopexit.loopexit4 -; EPILOG-BLOCK: latch.13: -; EPILOG-BLOCK-NEXT: br i1 true, label %latch.1.1, label %innerexit.loopexit.loopexit4 +; EPILOG-BLOCK-NEXT: br i1 true, label %latch.14, label %innerexit.loopexit.loopexit5 +; EPILOG-BLOCK: latch.14: +; EPILOG-BLOCK-NEXT: br i1 true, label %latch.1.1, label %innerexit.loopexit.loopexit5 ; EPILOG-BLOCK: latch.1.1: ; EPILOG-BLOCK-NEXT: %iv.next.1.1 = add nuw nsw i64 %phi.1, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1.1 = add i32 %niter.1, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1.1 = icmp ne i32 %niter.next.1.1, %unroll_iter.1 -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1.1, label %header.1, label %outerLatch.loopexit.unr-lcssa.loopexit.1, !llvm.loop !14 -; EPILOG-BLOCK: outerLatch.loopexit.unr-lcssa.loopexit.1: -; EPILOG-BLOCK-NEXT: br label %outerLatch.loopexit.unr-lcssa.1 +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1.1, label %header.1, label %outerLatch.loopexit.unr-lcssa.1, !llvm.loop !14 ; EPILOG-BLOCK: outerLatch.loopexit.unr-lcssa.1: ; EPILOG-BLOCK-NEXT: %lcmp.mod.1 = icmp ne i32 %xtraiter.1, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod.1, label %header.epil.preheader.1, label %outerLatch.loopexit.1 ; EPILOG-BLOCK: header.epil.preheader.1: +; EPILOG-BLOCK-NEXT: %lcmp.mod1.1 = icmp ne i32 %xtraiter.1, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod1.1) ; EPILOG-BLOCK-NEXT: br label %header.epil.1 ; EPILOG-BLOCK: header.epil.1: -; EPILOG-BLOCK-NEXT: br i1 true, label %latch.epil.1, label %innerexit.loopexit1 +; EPILOG-BLOCK-NEXT: br i1 true, label %latch.epil.1, label %innerexit.loopexit2 ; EPILOG-BLOCK: latch.epil.1: ; EPILOG-BLOCK-NEXT: br label %outerLatch.loopexit.1 ; EPILOG-BLOCK: outerLatch.loopexit.1: @@ -5171,7 +5154,7 @@ define void @test10(i64 %trip, i64 %trip2) { ; EPILOG-NEXT: %1 = add i64 %0, -1 ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %exit2.unr-lcssa, label %entry.new +; EPILOG-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG: entry.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %loop_header @@ -5220,28 +5203,28 @@ define void @test10(i64 %trip, i64 %trip2) { ; EPILOG-NEXT: %iv_next.7 = add i64 %iv, 8 ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp ne i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %exit2.unr-lcssa.loopexit +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %exit2.unr-lcssa ; EPILOG: exit1.loopexit: ; EPILOG-NEXT: br label %exit1 -; EPILOG: exit1.loopexit1: +; EPILOG: exit1.loopexit2: ; EPILOG-NEXT: br label %exit1 ; EPILOG: exit1: ; EPILOG-NEXT: ret void -; EPILOG: exit2.unr-lcssa.loopexit: -; EPILOG-NEXT: %iv.unr.ph = phi i64 [ %iv_next.7, %loop_latch.7 ] -; EPILOG-NEXT: br label %exit2.unr-lcssa ; EPILOG: exit2.unr-lcssa: -; EPILOG-NEXT: %iv.unr = phi i64 [ 0, %entry ], [ %iv.unr.ph, %exit2.unr-lcssa.loopexit ] +; EPILOG-NEXT: %iv.unr = phi i64 [ %iv_next.7, %loop_latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %exit2 ; EPILOG: loop_header.epil.preheader: +; EPILOG-NEXT: %iv.epil.init = phi i64 [ 0, %entry ], [ %iv.unr, %exit2.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-NEXT: br label %loop_header.epil ; EPILOG: loop_header.epil: -; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.unr, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] +; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.epil.init, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %loop_header.epil.preheader ], [ %epil.iter.next, %loop_latch.epil ] ; EPILOG-NEXT: call void @bar() ; EPILOG-NEXT: %cmp_early.epil = icmp ne i64 %iv.epil, %trip2 -; EPILOG-NEXT: br i1 %cmp_early.epil, label %loop_latch.epil, label %exit1.loopexit1 +; EPILOG-NEXT: br i1 %cmp_early.epil, label %loop_latch.epil, label %exit1.loopexit2 ; EPILOG: loop_latch.epil: ; EPILOG-NEXT: %iv_next.epil = add i64 %iv.epil, 1 ; EPILOG-NEXT: %cmp.epil = icmp ne i64 %iv_next.epil, %trip @@ -5259,7 +5242,7 @@ define void @test10(i64 %trip, i64 %trip2) { ; EPILOG-BLOCK-NEXT: %1 = add i64 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %exit2.unr-lcssa, label %entry.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG-BLOCK: entry.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %loop_header @@ -5278,23 +5261,23 @@ define void @test10(i64 %trip, i64 %trip2) { ; EPILOG-BLOCK-NEXT: %iv_next.1 = add i64 %iv, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp ne i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %exit2.unr-lcssa.loopexit, !llvm.loop !16 +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %exit2.unr-lcssa, !llvm.loop !16 ; EPILOG-BLOCK: exit1.loopexit: ; EPILOG-BLOCK-NEXT: br label %exit1 ; EPILOG-BLOCK: exit1: ; EPILOG-BLOCK-NEXT: ret void -; EPILOG-BLOCK: exit2.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: %iv.unr.ph = phi i64 [ %iv_next.1, %loop_latch.1 ] -; EPILOG-BLOCK-NEXT: br label %exit2.unr-lcssa ; EPILOG-BLOCK: exit2.unr-lcssa: -; EPILOG-BLOCK-NEXT: %iv.unr = phi i64 [ 0, %entry ], [ %iv.unr.ph, %exit2.unr-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %iv.unr = phi i64 [ %iv_next.1, %loop_latch.1 ] ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %exit2 ; EPILOG-BLOCK: loop_header.epil.preheader: +; EPILOG-BLOCK-NEXT: %iv.epil.init = phi i64 [ 0, %entry ], [ %iv.unr, %exit2.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-BLOCK-NEXT: br label %loop_header.epil ; EPILOG-BLOCK: loop_header.epil: ; EPILOG-BLOCK-NEXT: call void @bar() -; EPILOG-BLOCK-NEXT: %cmp_early.epil = icmp ne i64 %iv.unr, %trip2 +; EPILOG-BLOCK-NEXT: %cmp_early.epil = icmp ne i64 %iv.epil.init, %trip2 ; EPILOG-BLOCK-NEXT: br i1 %cmp_early.epil, label %loop_latch.epil, label %exit1 ; EPILOG-BLOCK: loop_latch.epil: ; EPILOG-BLOCK-NEXT: br label %exit2 @@ -5460,7 +5443,7 @@ define void @test11(i64 %trip, i1 %cond) { ; EPILOG-NEXT: %1 = add i64 %0, -1 ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %exit2.unr-lcssa, label %entry.new +; EPILOG-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG: entry.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %loop_header @@ -5494,27 +5477,27 @@ define void @test11(i64 %trip, i1 %cond) { ; EPILOG-NEXT: %iv_next.7 = add i64 %iv, 8 ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp ne i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %exit2.unr-lcssa.loopexit +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %exit2.unr-lcssa ; EPILOG: exit1.loopexit: ; EPILOG-NEXT: br label %exit1 -; EPILOG: exit1.loopexit1: +; EPILOG: exit1.loopexit2: ; EPILOG-NEXT: br label %exit1 ; EPILOG: exit1: ; EPILOG-NEXT: ret void -; EPILOG: exit2.unr-lcssa.loopexit: -; EPILOG-NEXT: %iv.unr.ph = phi i64 [ %iv_next.7, %loop_latch.7 ] -; EPILOG-NEXT: br label %exit2.unr-lcssa ; EPILOG: exit2.unr-lcssa: -; EPILOG-NEXT: %iv.unr = phi i64 [ 0, %entry ], [ %iv.unr.ph, %exit2.unr-lcssa.loopexit ] +; EPILOG-NEXT: %iv.unr = phi i64 [ %iv_next.7, %loop_latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %exit2 ; EPILOG: loop_header.epil.preheader: +; EPILOG-NEXT: %iv.epil.init = phi i64 [ 0, %entry ], [ %iv.unr, %exit2.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-NEXT: br label %loop_header.epil ; EPILOG: loop_header.epil: -; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.unr, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] +; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.epil.init, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %loop_header.epil.preheader ], [ %epil.iter.next, %loop_latch.epil ] ; EPILOG-NEXT: call void @bar() -; EPILOG-NEXT: br i1 %cond, label %loop_latch.epil, label %exit1.loopexit1 +; EPILOG-NEXT: br i1 %cond, label %loop_latch.epil, label %exit1.loopexit2 ; EPILOG: loop_latch.epil: ; EPILOG-NEXT: %iv_next.epil = add i64 %iv.epil, 1 ; EPILOG-NEXT: %cmp.epil = icmp ne i64 %iv_next.epil, %trip @@ -5532,7 +5515,7 @@ define void @test11(i64 %trip, i1 %cond) { ; EPILOG-BLOCK-NEXT: %1 = add i64 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %exit2.unr-lcssa, label %entry.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG-BLOCK: entry.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %loop_header @@ -5548,17 +5531,17 @@ define void @test11(i64 %trip, i1 %cond) { ; EPILOG-BLOCK-NEXT: %iv_next.1 = add i64 %iv, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp ne i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %exit2.unr-lcssa.loopexit, !llvm.loop !17 +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %exit2.unr-lcssa, !llvm.loop !17 ; EPILOG-BLOCK: exit1.loopexit: ; EPILOG-BLOCK-NEXT: br label %exit1 ; EPILOG-BLOCK: exit1: ; EPILOG-BLOCK-NEXT: ret void -; EPILOG-BLOCK: exit2.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: br label %exit2.unr-lcssa ; EPILOG-BLOCK: exit2.unr-lcssa: ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %exit2 ; EPILOG-BLOCK: loop_header.epil.preheader: +; EPILOG-BLOCK-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-BLOCK-NEXT: br label %loop_header.epil ; EPILOG-BLOCK: loop_header.epil: ; EPILOG-BLOCK-NEXT: call void @bar() @@ -5706,7 +5689,7 @@ define void @test12(i64 %trip, i64 %trip2, i1 %cond) { ; EPILOG-NEXT: %1 = add i64 %0, -1 ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %exit1.unr-lcssa, label %entry.new +; EPILOG-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG: entry.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %loop_header @@ -5771,33 +5754,33 @@ define void @test12(i64 %trip, i64 %trip2, i1 %cond) { ; EPILOG-NEXT: %iv_next.7 = add i64 %iv, 8 ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp ne i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %exit1.unr-lcssa.loopexit -; EPILOG: exit1.unr-lcssa.loopexit: -; EPILOG-NEXT: %iv.unr.ph = phi i64 [ %iv_next.7, %loop_latch.7 ] -; EPILOG-NEXT: br label %exit1.unr-lcssa +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %exit1.unr-lcssa ; EPILOG: exit1.unr-lcssa: -; EPILOG-NEXT: %iv.unr = phi i64 [ 0, %entry ], [ %iv.unr.ph, %exit1.unr-lcssa.loopexit ] +; EPILOG-NEXT: %iv.unr = phi i64 [ %iv_next.7, %loop_latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %exit1 ; EPILOG: loop_header.epil.preheader: +; EPILOG-NEXT: %iv.epil.init = phi i64 [ 0, %entry ], [ %iv.unr, %exit1.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-NEXT: br label %loop_header.epil ; EPILOG: loop_header.epil: -; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.unr, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] +; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.epil.init, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %loop_header.epil.preheader ], [ %epil.iter.next, %loop_latch.epil ] ; EPILOG-NEXT: call void @bar() ; EPILOG-NEXT: %cmp_early.epil = icmp ne i64 %iv.epil, %trip2 -; EPILOG-NEXT: br i1 %cmp_early.epil, label %loop_exiting_bb2.epil, label %exit1.epilog-lcssa.loopexit1 +; EPILOG-NEXT: br i1 %cmp_early.epil, label %loop_exiting_bb2.epil, label %exit1.epilog-lcssa.loopexit2 ; EPILOG: loop_exiting_bb2.epil: -; EPILOG-NEXT: br i1 %cond, label %loop_latch.epil, label %exit1.epilog-lcssa.loopexit1 +; EPILOG-NEXT: br i1 %cond, label %loop_latch.epil, label %exit1.epilog-lcssa.loopexit2 ; EPILOG: loop_latch.epil: ; EPILOG-NEXT: %iv_next.epil = add i64 %iv.epil, 1 ; EPILOG-NEXT: %cmp.epil = icmp ne i64 %iv_next.epil, %trip ; EPILOG-NEXT: %epil.iter.next = add i64 %epil.iter, 1 ; EPILOG-NEXT: %epil.iter.cmp = icmp ne i64 %epil.iter.next, %xtraiter -; EPILOG-NEXT: br i1 %epil.iter.cmp, label %loop_header.epil, label %exit1.epilog-lcssa.loopexit1, !llvm.loop !16 +; EPILOG-NEXT: br i1 %epil.iter.cmp, label %loop_header.epil, label %exit1.epilog-lcssa.loopexit2, !llvm.loop !16 ; EPILOG: exit1.epilog-lcssa.loopexit: ; EPILOG-NEXT: br label %exit1.epilog-lcssa -; EPILOG: exit1.epilog-lcssa.loopexit1: +; EPILOG: exit1.epilog-lcssa.loopexit2: ; EPILOG-NEXT: br label %exit1.epilog-lcssa ; EPILOG: exit1.epilog-lcssa: ; EPILOG-NEXT: br label %exit1 @@ -5810,7 +5793,7 @@ define void @test12(i64 %trip, i64 %trip2, i1 %cond) { ; EPILOG-BLOCK-NEXT: %1 = add i64 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %exit1.unr-lcssa, label %entry.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG-BLOCK: entry.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %loop_header @@ -5833,19 +5816,19 @@ define void @test12(i64 %trip, i64 %trip2, i1 %cond) { ; EPILOG-BLOCK-NEXT: %iv_next.1 = add i64 %iv, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp ne i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %exit1.unr-lcssa.loopexit, !llvm.loop !18 -; EPILOG-BLOCK: exit1.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: %iv.unr.ph = phi i64 [ %iv_next.1, %loop_latch.1 ] -; EPILOG-BLOCK-NEXT: br label %exit1.unr-lcssa +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %exit1.unr-lcssa, !llvm.loop !18 ; EPILOG-BLOCK: exit1.unr-lcssa: -; EPILOG-BLOCK-NEXT: %iv.unr = phi i64 [ 0, %entry ], [ %iv.unr.ph, %exit1.unr-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %iv.unr = phi i64 [ %iv_next.1, %loop_latch.1 ] ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %exit1 ; EPILOG-BLOCK: loop_header.epil.preheader: +; EPILOG-BLOCK-NEXT: %iv.epil.init = phi i64 [ 0, %entry ], [ %iv.unr, %exit1.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-BLOCK-NEXT: br label %loop_header.epil ; EPILOG-BLOCK: loop_header.epil: ; EPILOG-BLOCK-NEXT: call void @bar() -; EPILOG-BLOCK-NEXT: %cmp_early.epil = icmp ne i64 %iv.unr, %trip2 +; EPILOG-BLOCK-NEXT: %cmp_early.epil = icmp ne i64 %iv.epil.init, %trip2 ; EPILOG-BLOCK-NEXT: br i1 %cmp_early.epil, label %loop_exiting_bb2.epil, label %exit1.epilog-lcssa ; EPILOG-BLOCK: loop_exiting_bb2.epil: ; EPILOG-BLOCK-NEXT: br i1 %cond, label %loop_latch.epil, label %exit1.epilog-lcssa @@ -6038,7 +6021,7 @@ define void @test13(i64 %trip, i64 %trip2) { ; EPILOG-NEXT: %1 = add i64 %0, -1 ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %exit1.unr-lcssa, label %entry.new +; EPILOG-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG: entry.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %loop_header @@ -6111,34 +6094,34 @@ define void @test13(i64 %trip, i64 %trip2) { ; EPILOG-NEXT: %iv_next.7 = add i64 %iv, 8 ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp ne i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %exit1.unr-lcssa.loopexit -; EPILOG: exit1.unr-lcssa.loopexit: -; EPILOG-NEXT: %iv.unr.ph = phi i64 [ %iv_next.7, %loop_latch.7 ] -; EPILOG-NEXT: br label %exit1.unr-lcssa +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %exit1.unr-lcssa ; EPILOG: exit1.unr-lcssa: -; EPILOG-NEXT: %iv.unr = phi i64 [ 0, %entry ], [ %iv.unr.ph, %exit1.unr-lcssa.loopexit ] +; EPILOG-NEXT: %iv.unr = phi i64 [ %iv_next.7, %loop_latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %exit1 ; EPILOG: loop_header.epil.preheader: +; EPILOG-NEXT: %iv.epil.init = phi i64 [ 0, %entry ], [ %iv.unr, %exit1.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-NEXT: br label %loop_header.epil ; EPILOG: loop_header.epil: -; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.unr, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] +; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.epil.init, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %loop_header.epil.preheader ], [ %epil.iter.next, %loop_latch.epil ] ; EPILOG-NEXT: call void @bar() ; EPILOG-NEXT: %cmp_early.epil = icmp ne i64 %iv.epil, %trip2 -; EPILOG-NEXT: br i1 %cmp_early.epil, label %loop_exiting_bb2.epil, label %exit1.epilog-lcssa.loopexit1 +; EPILOG-NEXT: br i1 %cmp_early.epil, label %loop_exiting_bb2.epil, label %exit1.epilog-lcssa.loopexit2 ; EPILOG: loop_exiting_bb2.epil: ; EPILOG-NEXT: %unknown.epil = call i1 @unknown_cond() -; EPILOG-NEXT: br i1 %unknown.epil, label %loop_latch.epil, label %exit1.epilog-lcssa.loopexit1 +; EPILOG-NEXT: br i1 %unknown.epil, label %loop_latch.epil, label %exit1.epilog-lcssa.loopexit2 ; EPILOG: loop_latch.epil: ; EPILOG-NEXT: %iv_next.epil = add i64 %iv.epil, 1 ; EPILOG-NEXT: %cmp.epil = icmp ne i64 %iv_next.epil, %trip ; EPILOG-NEXT: %epil.iter.next = add i64 %epil.iter, 1 ; EPILOG-NEXT: %epil.iter.cmp = icmp ne i64 %epil.iter.next, %xtraiter -; EPILOG-NEXT: br i1 %epil.iter.cmp, label %loop_header.epil, label %exit1.epilog-lcssa.loopexit1, !llvm.loop !17 +; EPILOG-NEXT: br i1 %epil.iter.cmp, label %loop_header.epil, label %exit1.epilog-lcssa.loopexit2, !llvm.loop !17 ; EPILOG: exit1.epilog-lcssa.loopexit: ; EPILOG-NEXT: br label %exit1.epilog-lcssa -; EPILOG: exit1.epilog-lcssa.loopexit1: +; EPILOG: exit1.epilog-lcssa.loopexit2: ; EPILOG-NEXT: br label %exit1.epilog-lcssa ; EPILOG: exit1.epilog-lcssa: ; EPILOG-NEXT: br label %exit1 @@ -6151,7 +6134,7 @@ define void @test13(i64 %trip, i64 %trip2) { ; EPILOG-BLOCK-NEXT: %1 = add i64 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %exit1.unr-lcssa, label %entry.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG-BLOCK: entry.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %loop_header @@ -6176,19 +6159,19 @@ define void @test13(i64 %trip, i64 %trip2) { ; EPILOG-BLOCK-NEXT: %iv_next.1 = add i64 %iv, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp ne i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %exit1.unr-lcssa.loopexit, !llvm.loop !19 -; EPILOG-BLOCK: exit1.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: %iv.unr.ph = phi i64 [ %iv_next.1, %loop_latch.1 ] -; EPILOG-BLOCK-NEXT: br label %exit1.unr-lcssa +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %exit1.unr-lcssa, !llvm.loop !19 ; EPILOG-BLOCK: exit1.unr-lcssa: -; EPILOG-BLOCK-NEXT: %iv.unr = phi i64 [ 0, %entry ], [ %iv.unr.ph, %exit1.unr-lcssa.loopexit ] +; EPILOG-BLOCK-NEXT: %iv.unr = phi i64 [ %iv_next.1, %loop_latch.1 ] ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %exit1 ; EPILOG-BLOCK: loop_header.epil.preheader: +; EPILOG-BLOCK-NEXT: %iv.epil.init = phi i64 [ 0, %entry ], [ %iv.unr, %exit1.unr-lcssa ] +; EPILOG-BLOCK-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-BLOCK-NEXT: br label %loop_header.epil ; EPILOG-BLOCK: loop_header.epil: ; EPILOG-BLOCK-NEXT: call void @bar() -; EPILOG-BLOCK-NEXT: %cmp_early.epil = icmp ne i64 %iv.unr, %trip2 +; EPILOG-BLOCK-NEXT: %cmp_early.epil = icmp ne i64 %iv.epil.init, %trip2 ; EPILOG-BLOCK-NEXT: br i1 %cmp_early.epil, label %loop_exiting_bb2.epil, label %exit1.epilog-lcssa ; EPILOG-BLOCK: loop_exiting_bb2.epil: ; EPILOG-BLOCK-NEXT: %unknown.epil = call i1 @unknown_cond() @@ -6393,7 +6376,7 @@ define void @test14(i64 %trip, i1 %cond) { ; EPILOG-NEXT: %1 = add i64 %0, -1 ; EPILOG-NEXT: %xtraiter = and i64 %0, 7 ; EPILOG-NEXT: %2 = icmp ult i64 %1, 7 -; EPILOG-NEXT: br i1 %2, label %exit1.unr-lcssa, label %entry.new +; EPILOG-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG: entry.new: ; EPILOG-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-NEXT: br label %loop_header @@ -6451,33 +6434,33 @@ define void @test14(i64 %trip, i1 %cond) { ; EPILOG-NEXT: %iv_next.7 = add i64 %iv, 8 ; EPILOG-NEXT: %niter.next.7 = add i64 %niter, 8 ; EPILOG-NEXT: %niter.ncmp.7 = icmp ne i64 %niter.next.7, %unroll_iter -; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %exit1.unr-lcssa.loopexit -; EPILOG: exit1.unr-lcssa.loopexit: -; EPILOG-NEXT: %iv.unr.ph = phi i64 [ %iv_next.7, %loop_latch.7 ] -; EPILOG-NEXT: br label %exit1.unr-lcssa +; EPILOG-NEXT: br i1 %niter.ncmp.7, label %loop_header, label %exit1.unr-lcssa ; EPILOG: exit1.unr-lcssa: -; EPILOG-NEXT: %iv.unr = phi i64 [ 0, %entry ], [ %iv.unr.ph, %exit1.unr-lcssa.loopexit ] +; EPILOG-NEXT: %iv.unr = phi i64 [ %iv_next.7, %loop_latch.7 ] ; EPILOG-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %exit1 ; EPILOG: loop_header.epil.preheader: +; EPILOG-NEXT: %iv.epil.init = phi i64 [ 0, %entry ], [ %iv.unr, %exit1.unr-lcssa ] +; EPILOG-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-NEXT: br label %loop_header.epil ; EPILOG: loop_header.epil: -; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.unr, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] +; EPILOG-NEXT: %iv.epil = phi i64 [ %iv.epil.init, %loop_header.epil.preheader ], [ %iv_next.epil, %loop_latch.epil ] ; EPILOG-NEXT: %epil.iter = phi i64 [ 0, %loop_header.epil.preheader ], [ %epil.iter.next, %loop_latch.epil ] ; EPILOG-NEXT: call void @bar() -; EPILOG-NEXT: br i1 %cond, label %loop_exiting_bb2.epil, label %exit1.epilog-lcssa.loopexit1 +; EPILOG-NEXT: br i1 %cond, label %loop_exiting_bb2.epil, label %exit1.epilog-lcssa.loopexit2 ; EPILOG: loop_exiting_bb2.epil: ; EPILOG-NEXT: %unknown.epil = call i1 @unknown_cond() -; EPILOG-NEXT: br i1 %unknown.epil, label %loop_latch.epil, label %exit1.epilog-lcssa.loopexit1 +; EPILOG-NEXT: br i1 %unknown.epil, label %loop_latch.epil, label %exit1.epilog-lcssa.loopexit2 ; EPILOG: loop_latch.epil: ; EPILOG-NEXT: %iv_next.epil = add i64 %iv.epil, 1 ; EPILOG-NEXT: %cmp.epil = icmp ne i64 %iv_next.epil, %trip ; EPILOG-NEXT: %epil.iter.next = add i64 %epil.iter, 1 ; EPILOG-NEXT: %epil.iter.cmp = icmp ne i64 %epil.iter.next, %xtraiter -; EPILOG-NEXT: br i1 %epil.iter.cmp, label %loop_header.epil, label %exit1.epilog-lcssa.loopexit1, !llvm.loop !18 +; EPILOG-NEXT: br i1 %epil.iter.cmp, label %loop_header.epil, label %exit1.epilog-lcssa.loopexit2, !llvm.loop !18 ; EPILOG: exit1.epilog-lcssa.loopexit: ; EPILOG-NEXT: br label %exit1.epilog-lcssa -; EPILOG: exit1.epilog-lcssa.loopexit1: +; EPILOG: exit1.epilog-lcssa.loopexit2: ; EPILOG-NEXT: br label %exit1.epilog-lcssa ; EPILOG: exit1.epilog-lcssa: ; EPILOG-NEXT: br label %exit1 @@ -6490,7 +6473,7 @@ define void @test14(i64 %trip, i1 %cond) { ; EPILOG-BLOCK-NEXT: %1 = add i64 %0, -1 ; EPILOG-BLOCK-NEXT: %xtraiter = and i64 %0, 1 ; EPILOG-BLOCK-NEXT: %2 = icmp ult i64 %1, 1 -; EPILOG-BLOCK-NEXT: br i1 %2, label %exit1.unr-lcssa, label %entry.new +; EPILOG-BLOCK-NEXT: br i1 %2, label %loop_header.epil.preheader, label %entry.new ; EPILOG-BLOCK: entry.new: ; EPILOG-BLOCK-NEXT: %unroll_iter = sub i64 %0, %xtraiter ; EPILOG-BLOCK-NEXT: br label %loop_header @@ -6512,13 +6495,13 @@ define void @test14(i64 %trip, i1 %cond) { ; EPILOG-BLOCK-NEXT: %iv_next.1 = add i64 %iv, 2 ; EPILOG-BLOCK-NEXT: %niter.next.1 = add i64 %niter, 2 ; EPILOG-BLOCK-NEXT: %niter.ncmp.1 = icmp ne i64 %niter.next.1, %unroll_iter -; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %exit1.unr-lcssa.loopexit, !llvm.loop !20 -; EPILOG-BLOCK: exit1.unr-lcssa.loopexit: -; EPILOG-BLOCK-NEXT: br label %exit1.unr-lcssa +; EPILOG-BLOCK-NEXT: br i1 %niter.ncmp.1, label %loop_header, label %exit1.unr-lcssa, !llvm.loop !20 ; EPILOG-BLOCK: exit1.unr-lcssa: ; EPILOG-BLOCK-NEXT: %lcmp.mod = icmp ne i64 %xtraiter, 0 ; EPILOG-BLOCK-NEXT: br i1 %lcmp.mod, label %loop_header.epil.preheader, label %exit1 ; EPILOG-BLOCK: loop_header.epil.preheader: +; EPILOG-BLOCK-NEXT: %lcmp.mod1 = icmp ne i64 %xtraiter, 0 +; EPILOG-BLOCK-NEXT: call void @llvm.assume(i1 %lcmp.mod1) ; EPILOG-BLOCK-NEXT: br label %loop_header.epil ; EPILOG-BLOCK: loop_header.epil: ; EPILOG-BLOCK-NEXT: call void @bar() diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop.ll index 8acf74a..492de06 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-loop.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-loop.ll @@ -22,7 +22,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 ; EPILOG: br i1 %cmp1, label %for.end, label %for.body.preheader, !prof [[EPILOG_PROF_0:![0-9]+]] ; EPILOG: for.body.preheader: ; EPILOG: %xtraiter = and i32 %n -; EPILOG: br i1 %1, label %for.end.loopexit.unr-lcssa, label %for.body.preheader.new, !prof [[EPILOG_PROF_1:![0-9]+]] +; EPILOG: br i1 %1, label %for.body.epil.preheader, label %for.body.preheader.new, !prof [[EPILOG_PROF_1:![0-9]+]] ; EPILOG: for.end.loopexit.unr-lcssa: ; EPILOG: %lcmp.mod = icmp ne i32 %xtraiter, 0 @@ -41,7 +41,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 ; NOPROLOG-NOT: %xtraiter = and i32 %n ; EPILOG: for.body.epil: -; EPILOG: %indvars.iv.epil = phi i64 [ %indvars.iv.next.epil, %for.body.epil ], [ %indvars.iv.unr, %for.body.epil.preheader ] +; EPILOG: %indvars.iv.epil = phi i64 [ %indvars.iv.next.epil, %for.body.epil ], [ %indvars.iv.epil.init, %for.body.epil.preheader ] ; EPILOG: %epil.iter.next = add i32 %epil.iter, 1 ; EPILOG: %epil.iter.cmp = icmp ne i32 %epil.iter.next, %xtraiter ; EPILOG: br i1 %epil.iter.cmp, label %for.body.epil, label %for.end.loopexit.epilog-lcssa, !prof [[EPILOG_PROF_3:![0-9]+]], !llvm.loop [[EPILOG_LOOP:![0-9]+]] diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop1.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop1.ll index 492ddd1..0eeb3ad 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-loop1.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-loop1.ll @@ -8,9 +8,9 @@ ; EPILOG: for.body.preheader: -; EPILOG: br i1 %1, label %for.end.loopexit.unr-lcssa, label %for.body.preheader.new, !dbg [[PH_LOC:![0-9]+]] +; EPILOG: br i1 %1, label %for.body.epil.preheader, label %for.body.preheader.new, !dbg [[PH_LOC:![0-9]+]] ; EPILOG: for.body: -; EPILOG: br i1 %niter.ncmp.1, label %for.end.loopexit.unr-lcssa.loopexit, label %for.body, !dbg [[PH_LOC]] +; EPILOG: br i1 %niter.ncmp.1, label %for.end.loopexit.unr-lcssa, label %for.body, !dbg [[PH_LOC]] ; EPILOG-NOT: br i1 %niter.ncmp.2, label %for.end.loopexit{{.*}}, label %for.body ; EPILOG: for.body.epil.preheader: ; EPILOG: br label %for.body.epil, !dbg [[PH_LOC]] diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop2.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop2.ll index 0e11fff..a573de2 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-loop2.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-loop2.ll @@ -8,8 +8,8 @@ ; This test makes sure we're not unrolling 'odd' counts ; EPILOG: for.body: -; EPILOG: br i1 %niter.ncmp.3, label %for.end.loopexit.unr-lcssa.loopexit{{.*}}, label %for.body -; EPILOG-NOT: br i1 %niter.ncmp.4, label %for.end.loopexit.unr-lcssa.loopexit{{.*}}, label %for.body +; EPILOG: br i1 %niter.ncmp.3, label %for.end.loopexit.unr-lcssa{{.*}}, label %for.body +; EPILOG-NOT: br i1 %niter.ncmp.4, label %for.end.loopexit.unr-lcssa{{.*}}, label %for.body ; EPILOG: for.body.epil: ; PROLOG: for.body.prol: diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop5.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop5.ll index fa9f902..0cee4e2 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-loop5.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-loop5.ll @@ -69,7 +69,7 @@ define i3 @test(ptr %a, i3 %n) { ; UNROLL-4-NEXT: [[TMP0:%.*]] = add i3 [[N]], -1 ; UNROLL-4-NEXT: [[XTRAITER:%.*]] = and i3 [[N]], 3 ; UNROLL-4-NEXT: [[TMP1:%.*]] = icmp ult i3 [[TMP0]], 3 -; UNROLL-4-NEXT: br i1 [[TMP1]], label [[FOR_END_LOOPEXIT_UNR_LCSSA:%.*]], label [[FOR_BODY_PREHEADER_NEW:%.*]] +; UNROLL-4-NEXT: br i1 [[TMP1]], label [[FOR_BODY_EPIL_PREHEADER:%.*]], label [[FOR_BODY_PREHEADER_NEW:%.*]] ; UNROLL-4: for.body.preheader.new: ; UNROLL-4-NEXT: [[UNROLL_ITER:%.*]] = sub i3 [[N]], [[XTRAITER]] ; UNROLL-4-NEXT: br label [[FOR_BODY:%.*]] @@ -95,23 +95,22 @@ define i3 @test(ptr %a, i3 %n) { ; UNROLL-4-NEXT: [[INDVARS_IV_NEXT_3]] = add nuw nsw i64 [[INDVARS_IV]], 4 ; UNROLL-4-NEXT: [[NITER_NEXT_3]] = add i3 [[NITER]], -4 ; UNROLL-4-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i3 [[NITER_NEXT_3]], [[UNROLL_ITER]] -; UNROLL-4-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; UNROLL-4: for.end.loopexit.unr-lcssa.loopexit: +; UNROLL-4-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_END_LOOPEXIT_UNR_LCSSA:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; UNROLL-4: for.end.loopexit.unr-lcssa: ; UNROLL-4-NEXT: [[ADD_LCSSA_PH_PH:%.*]] = phi i3 [ [[ADD_3]], [[FOR_BODY]] ] ; UNROLL-4-NEXT: [[INDVARS_IV_UNR_PH:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_3]], [[FOR_BODY]] ] ; UNROLL-4-NEXT: [[SUM_02_UNR_PH:%.*]] = phi i3 [ [[ADD_3]], [[FOR_BODY]] ] -; UNROLL-4-NEXT: br label [[FOR_END_LOOPEXIT_UNR_LCSSA]] -; UNROLL-4: for.end.loopexit.unr-lcssa: -; UNROLL-4-NEXT: [[ADD_LCSSA_PH:%.*]] = phi i3 [ poison, [[FOR_BODY_PREHEADER]] ], [ [[ADD_LCSSA_PH_PH]], [[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] -; UNROLL-4-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_UNR_PH]], [[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] -; UNROLL-4-NEXT: [[SUM_02_UNR:%.*]] = phi i3 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[SUM_02_UNR_PH]], [[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] ; UNROLL-4-NEXT: [[LCMP_MOD:%.*]] = icmp ne i3 [[XTRAITER]], 0 -; UNROLL-4-NEXT: br i1 [[LCMP_MOD]], label [[FOR_BODY_EPIL_PREHEADER:%.*]], label [[FOR_END_LOOPEXIT:%.*]] +; UNROLL-4-NEXT: br i1 [[LCMP_MOD]], label [[FOR_BODY_EPIL_PREHEADER]], label [[FOR_END_LOOPEXIT:%.*]] ; UNROLL-4: for.body.epil.preheader: +; UNROLL-4-NEXT: [[INDVARS_IV_EPIL_INIT:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_UNR_PH]], [[FOR_END_LOOPEXIT_UNR_LCSSA]] ] +; UNROLL-4-NEXT: [[SUM_02_EPIL_INIT:%.*]] = phi i3 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[SUM_02_UNR_PH]], [[FOR_END_LOOPEXIT_UNR_LCSSA]] ] +; UNROLL-4-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i3 [[XTRAITER]], 0 +; UNROLL-4-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) ; UNROLL-4-NEXT: br label [[FOR_BODY_EPIL:%.*]] ; UNROLL-4: for.body.epil: -; UNROLL-4-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[FOR_BODY_EPIL]] ], [ [[INDVARS_IV_UNR]], [[FOR_BODY_EPIL_PREHEADER]] ] -; UNROLL-4-NEXT: [[SUM_02_EPIL:%.*]] = phi i3 [ [[ADD_EPIL:%.*]], [[FOR_BODY_EPIL]] ], [ [[SUM_02_UNR]], [[FOR_BODY_EPIL_PREHEADER]] ] +; UNROLL-4-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[FOR_BODY_EPIL]] ], [ [[INDVARS_IV_EPIL_INIT]], [[FOR_BODY_EPIL_PREHEADER]] ] +; UNROLL-4-NEXT: [[SUM_02_EPIL:%.*]] = phi i3 [ [[ADD_EPIL:%.*]], [[FOR_BODY_EPIL]] ], [ [[SUM_02_EPIL_INIT]], [[FOR_BODY_EPIL_PREHEADER]] ] ; UNROLL-4-NEXT: [[EPIL_ITER:%.*]] = phi i3 [ 0, [[FOR_BODY_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], [[FOR_BODY_EPIL]] ] ; UNROLL-4-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i3, ptr [[A]], i64 [[INDVARS_IV_EPIL]] ; UNROLL-4-NEXT: [[TMP6:%.*]] = load i3, ptr [[ARRAYIDX_EPIL]], align 1 @@ -126,7 +125,7 @@ define i3 @test(ptr %a, i3 %n) { ; UNROLL-4-NEXT: [[ADD_LCSSA_PH1:%.*]] = phi i3 [ [[ADD_EPIL]], [[FOR_BODY_EPIL]] ] ; UNROLL-4-NEXT: br label [[FOR_END_LOOPEXIT]] ; UNROLL-4: for.end.loopexit: -; UNROLL-4-NEXT: [[ADD_LCSSA:%.*]] = phi i3 [ [[ADD_LCSSA_PH]], [[FOR_END_LOOPEXIT_UNR_LCSSA]] ], [ [[ADD_LCSSA_PH1]], [[FOR_END_LOOPEXIT_EPILOG_LCSSA]] ] +; UNROLL-4-NEXT: [[ADD_LCSSA:%.*]] = phi i3 [ [[ADD_LCSSA_PH_PH]], [[FOR_END_LOOPEXIT_UNR_LCSSA]] ], [ [[ADD_LCSSA_PH1]], [[FOR_END_LOOPEXIT_EPILOG_LCSSA]] ] ; UNROLL-4-NEXT: br label [[FOR_END]] ; UNROLL-4: for.end: ; UNROLL-4-NEXT: [[SUM_0_LCSSA:%.*]] = phi i3 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_END_LOOPEXIT]] ] diff --git a/llvm/test/Transforms/LoopUnroll/runtime-multiexit-heuristic.ll b/llvm/test/Transforms/LoopUnroll/runtime-multiexit-heuristic.ll index d3e5e0b..65ef3e4 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-multiexit-heuristic.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-multiexit-heuristic.ll @@ -19,7 +19,7 @@ define i32 @test1(ptr nocapture %a, i64 %n) { ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP0]], 7 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i64 [[TMP1]], 7 -; CHECK-NEXT: br i1 [[TMP2]], label [[LATCHEXIT_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP2]], label [[HEADER_EPIL_PREHEADER:%.*]], label [[ENTRY_NEW:%.*]] ; CHECK: entry.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[TMP0]], -8 ; CHECK-NEXT: br label [[HEADER:%.*]] @@ -94,20 +94,19 @@ define i32 @test1(ptr nocapture %a, i64 %n) { ; CHECK-NEXT: [[INDVARS_IV_NEXT_7]] = add i64 [[INDVARS_IV]], 8 ; CHECK-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8 ; CHECK-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label [[LATCHEXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[HEADER]] -; CHECK: latchexit.unr-lcssa.loopexit: -; CHECK-NEXT: br label [[LATCHEXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label [[LATCHEXIT_UNR_LCSSA:%.*]], label [[HEADER]] ; CHECK: latchexit.unr-lcssa: -; CHECK-NEXT: [[SUM_0_LCSSA_PH:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[ADD_7]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT_7]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[SUM_02_UNR:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD_7]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] ; CHECK-NEXT: [[LCMP_MOD_NOT:%.*]] = icmp eq i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label [[LATCHEXIT:%.*]], label [[HEADER_EPIL_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label [[LATCHEXIT:%.*]], label [[HEADER_EPIL_PREHEADER]] ; CHECK: header.epil.preheader: +; CHECK-NEXT: [[INDVARS_IV_EPIL_INIT:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT_7]], [[LATCHEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[SUM_02_EPIL_INIT:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD_7]], [[LATCHEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD3:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD3]]) ; CHECK-NEXT: br label [[HEADER_EPIL:%.*]] ; CHECK: header.epil: -; CHECK-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[LATCH_EPIL:%.*]] ], [ [[INDVARS_IV_UNR]], [[HEADER_EPIL_PREHEADER]] ] -; CHECK-NEXT: [[SUM_02_EPIL:%.*]] = phi i32 [ [[ADD_EPIL:%.*]], [[LATCH_EPIL]] ], [ [[SUM_02_UNR]], [[HEADER_EPIL_PREHEADER]] ] +; CHECK-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[LATCH_EPIL:%.*]] ], [ [[INDVARS_IV_EPIL_INIT]], [[HEADER_EPIL_PREHEADER]] ] +; CHECK-NEXT: [[SUM_02_EPIL:%.*]] = phi i32 [ [[ADD_EPIL:%.*]], [[LATCH_EPIL]] ], [ [[SUM_02_EPIL_INIT]], [[HEADER_EPIL_PREHEADER]] ] ; CHECK-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ [[EPIL_ITER_NEXT:%.*]], [[LATCH_EPIL]] ], [ 0, [[HEADER_EPIL_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_EXITING_BLOCK_EPIL:%.*]] ; CHECK: for.exiting_block.epil: @@ -124,11 +123,11 @@ define i32 @test1(ptr nocapture %a, i64 %n) { ; CHECK: latchexit.epilog-lcssa: ; CHECK-NEXT: br label [[LATCHEXIT]] ; CHECK: latchexit: -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA_PH]], [[LATCHEXIT_UNR_LCSSA]] ], [ [[ADD_EPIL]], [[LATCHEXIT_EPILOG_LCSSA]] ] +; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[ADD_7]], [[LATCHEXIT_UNR_LCSSA]] ], [ [[ADD_EPIL]], [[LATCHEXIT_EPILOG_LCSSA]] ] ; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] ; CHECK: otherexit.loopexit: ; CHECK-NEXT: br label [[OTHEREXIT:%.*]] -; CHECK: otherexit.loopexit3: +; CHECK: otherexit.loopexit4: ; CHECK-NEXT: br label [[OTHEREXIT]] ; CHECK: otherexit: ; CHECK-NEXT: [[SUM_02_LCSSA:%.*]] = phi i32 [ [[SUM_02]], [[OTHEREXIT_LOOPEXIT]] ], [ [[SUM_02_EPIL]], [[OTHEREXIT_LOOPEXIT3]] ] @@ -166,7 +165,7 @@ define i32 @test1(ptr nocapture %a, i64 %n) { ; ENABLED-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], -1 ; ENABLED-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP0]], 7 ; ENABLED-NEXT: [[TMP2:%.*]] = icmp ult i64 [[TMP1]], 7 -; ENABLED-NEXT: br i1 [[TMP2]], label [[LATCHEXIT_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]] +; ENABLED-NEXT: br i1 [[TMP2]], label [[HEADER_EPIL_PREHEADER:%.*]], label [[ENTRY_NEW:%.*]] ; ENABLED: entry.new: ; ENABLED-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[TMP0]], [[XTRAITER]] ; ENABLED-NEXT: br label [[HEADER:%.*]] @@ -248,23 +247,22 @@ define i32 @test1(ptr nocapture %a, i64 %n) { ; ENABLED-NEXT: [[INDVARS_IV_NEXT_7]] = add i64 [[INDVARS_IV]], 8 ; ENABLED-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8 ; ENABLED-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]] -; ENABLED-NEXT: br i1 [[NITER_NCMP_7]], label [[LATCHEXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[HEADER]] -; ENABLED: latchexit.unr-lcssa.loopexit: +; ENABLED-NEXT: br i1 [[NITER_NCMP_7]], label [[LATCHEXIT_UNR_LCSSA:%.*]], label [[HEADER]] +; ENABLED: latchexit.unr-lcssa: ; ENABLED-NEXT: [[SUM_0_LCSSA_PH_PH:%.*]] = phi i32 [ [[ADD_7]], [[LATCH_7]] ] ; ENABLED-NEXT: [[INDVARS_IV_UNR_PH:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_7]], [[LATCH_7]] ] ; ENABLED-NEXT: [[SUM_02_UNR_PH:%.*]] = phi i32 [ [[ADD_7]], [[LATCH_7]] ] -; ENABLED-NEXT: br label [[LATCHEXIT_UNR_LCSSA]] -; ENABLED: latchexit.unr-lcssa: -; ENABLED-NEXT: [[SUM_0_LCSSA_PH:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[SUM_0_LCSSA_PH_PH]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] -; ENABLED-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_UNR_PH]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] -; ENABLED-NEXT: [[SUM_02_UNR:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_02_UNR_PH]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] ; ENABLED-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; ENABLED-NEXT: br i1 [[LCMP_MOD]], label [[HEADER_EPIL_PREHEADER:%.*]], label [[LATCHEXIT:%.*]] +; ENABLED-NEXT: br i1 [[LCMP_MOD]], label [[HEADER_EPIL_PREHEADER]], label [[LATCHEXIT:%.*]] ; ENABLED: header.epil.preheader: +; ENABLED-NEXT: [[INDVARS_IV_EPIL_INIT:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_UNR_PH]], [[LATCHEXIT_UNR_LCSSA]] ] +; ENABLED-NEXT: [[SUM_02_EPIL_INIT:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_02_UNR_PH]], [[LATCHEXIT_UNR_LCSSA]] ] +; ENABLED-NEXT: [[LCMP_MOD3:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; ENABLED-NEXT: call void @llvm.assume(i1 [[LCMP_MOD3]]) ; ENABLED-NEXT: br label [[HEADER_EPIL:%.*]] ; ENABLED: header.epil: -; ENABLED-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[LATCH_EPIL:%.*]] ], [ [[INDVARS_IV_UNR]], [[HEADER_EPIL_PREHEADER]] ] -; ENABLED-NEXT: [[SUM_02_EPIL:%.*]] = phi i32 [ [[ADD_EPIL:%.*]], [[LATCH_EPIL]] ], [ [[SUM_02_UNR]], [[HEADER_EPIL_PREHEADER]] ] +; ENABLED-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[LATCH_EPIL:%.*]] ], [ [[INDVARS_IV_EPIL_INIT]], [[HEADER_EPIL_PREHEADER]] ] +; ENABLED-NEXT: [[SUM_02_EPIL:%.*]] = phi i32 [ [[ADD_EPIL:%.*]], [[LATCH_EPIL]] ], [ [[SUM_02_EPIL_INIT]], [[HEADER_EPIL_PREHEADER]] ] ; ENABLED-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ 0, [[HEADER_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], [[LATCH_EPIL]] ] ; ENABLED-NEXT: br label [[FOR_EXITING_BLOCK_EPIL:%.*]] ; ENABLED: for.exiting_block.epil: @@ -283,12 +281,12 @@ define i32 @test1(ptr nocapture %a, i64 %n) { ; ENABLED-NEXT: [[SUM_0_LCSSA_PH2:%.*]] = phi i32 [ [[ADD_EPIL]], [[LATCH_EPIL]] ] ; ENABLED-NEXT: br label [[LATCHEXIT]] ; ENABLED: latchexit: -; ENABLED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA_PH]], [[LATCHEXIT_UNR_LCSSA]] ], [ [[SUM_0_LCSSA_PH2]], [[LATCHEXIT_EPILOG_LCSSA]] ] +; ENABLED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA_PH_PH]], [[LATCHEXIT_UNR_LCSSA]] ], [ [[SUM_0_LCSSA_PH2]], [[LATCHEXIT_EPILOG_LCSSA]] ] ; ENABLED-NEXT: ret i32 [[SUM_0_LCSSA]] ; ENABLED: otherexit.loopexit: ; ENABLED-NEXT: [[SUM_02_LCSSA_PH:%.*]] = phi i32 [ [[SUM_02]], [[FOR_EXITING_BLOCK]] ], [ [[ADD]], [[FOR_EXITING_BLOCK_1]] ], [ [[ADD_1]], [[FOR_EXITING_BLOCK_2]] ], [ [[ADD_2]], [[FOR_EXITING_BLOCK_3]] ], [ [[ADD_3]], [[FOR_EXITING_BLOCK_4]] ], [ [[ADD_4]], [[FOR_EXITING_BLOCK_5]] ], [ [[ADD_5]], [[FOR_EXITING_BLOCK_6]] ], [ [[ADD_6]], [[FOR_EXITING_BLOCK_7]] ] ; ENABLED-NEXT: br label [[OTHEREXIT:%.*]] -; ENABLED: otherexit.loopexit3: +; ENABLED: otherexit.loopexit4: ; ENABLED-NEXT: [[SUM_02_LCSSA_PH4:%.*]] = phi i32 [ [[SUM_02_EPIL]], [[FOR_EXITING_BLOCK_EPIL]] ] ; ENABLED-NEXT: br label [[OTHEREXIT]] ; ENABLED: otherexit: @@ -380,7 +378,7 @@ define i32 @test2(ptr nocapture %a, i64 %n) { ; ENABLED-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], -1 ; ENABLED-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP0]], 7 ; ENABLED-NEXT: [[TMP2:%.*]] = icmp ult i64 [[TMP1]], 7 -; ENABLED-NEXT: br i1 [[TMP2]], label [[LATCHEXIT_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]] +; ENABLED-NEXT: br i1 [[TMP2]], label [[HEADER_EPIL_PREHEADER:%.*]], label [[ENTRY_NEW:%.*]] ; ENABLED: entry.new: ; ENABLED-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[TMP0]], [[XTRAITER]] ; ENABLED-NEXT: br label [[HEADER:%.*]] @@ -462,23 +460,22 @@ define i32 @test2(ptr nocapture %a, i64 %n) { ; ENABLED-NEXT: [[INDVARS_IV_NEXT_7]] = add i64 [[INDVARS_IV]], 8 ; ENABLED-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8 ; ENABLED-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]] -; ENABLED-NEXT: br i1 [[NITER_NCMP_7]], label [[LATCHEXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[HEADER]] -; ENABLED: latchexit.unr-lcssa.loopexit: +; ENABLED-NEXT: br i1 [[NITER_NCMP_7]], label [[LATCHEXIT_UNR_LCSSA:%.*]], label [[HEADER]] +; ENABLED: latchexit.unr-lcssa: ; ENABLED-NEXT: [[SUM_0_LCSSA_PH_PH:%.*]] = phi i32 [ [[ADD_7]], [[LATCH_7]] ] ; ENABLED-NEXT: [[INDVARS_IV_UNR_PH:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_7]], [[LATCH_7]] ] ; ENABLED-NEXT: [[SUM_02_UNR_PH:%.*]] = phi i32 [ [[ADD_7]], [[LATCH_7]] ] -; ENABLED-NEXT: br label [[LATCHEXIT_UNR_LCSSA]] -; ENABLED: latchexit.unr-lcssa: -; ENABLED-NEXT: [[SUM_0_LCSSA_PH:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[SUM_0_LCSSA_PH_PH]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] -; ENABLED-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_UNR_PH]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] -; ENABLED-NEXT: [[SUM_02_UNR:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_02_UNR_PH]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] ; ENABLED-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; ENABLED-NEXT: br i1 [[LCMP_MOD]], label [[HEADER_EPIL_PREHEADER:%.*]], label [[LATCHEXIT:%.*]] +; ENABLED-NEXT: br i1 [[LCMP_MOD]], label [[HEADER_EPIL_PREHEADER]], label [[LATCHEXIT:%.*]] ; ENABLED: header.epil.preheader: +; ENABLED-NEXT: [[INDVARS_IV_EPIL_INIT:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_UNR_PH]], [[LATCHEXIT_UNR_LCSSA]] ] +; ENABLED-NEXT: [[SUM_02_EPIL_INIT:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_02_UNR_PH]], [[LATCHEXIT_UNR_LCSSA]] ] +; ENABLED-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; ENABLED-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) ; ENABLED-NEXT: br label [[HEADER_EPIL:%.*]] ; ENABLED: header.epil: -; ENABLED-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[LATCH_EPIL:%.*]] ], [ [[INDVARS_IV_UNR]], [[HEADER_EPIL_PREHEADER]] ] -; ENABLED-NEXT: [[SUM_02_EPIL:%.*]] = phi i32 [ [[ADD_EPIL:%.*]], [[LATCH_EPIL]] ], [ [[SUM_02_UNR]], [[HEADER_EPIL_PREHEADER]] ] +; ENABLED-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[LATCH_EPIL:%.*]] ], [ [[INDVARS_IV_EPIL_INIT]], [[HEADER_EPIL_PREHEADER]] ] +; ENABLED-NEXT: [[SUM_02_EPIL:%.*]] = phi i32 [ [[ADD_EPIL:%.*]], [[LATCH_EPIL]] ], [ [[SUM_02_EPIL_INIT]], [[HEADER_EPIL_PREHEADER]] ] ; ENABLED-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ 0, [[HEADER_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], [[LATCH_EPIL]] ] ; ENABLED-NEXT: br label [[FOR_EXITING_BLOCK_EPIL:%.*]] ; ENABLED: for.exiting_block.epil: @@ -497,12 +494,12 @@ define i32 @test2(ptr nocapture %a, i64 %n) { ; ENABLED-NEXT: [[SUM_0_LCSSA_PH1:%.*]] = phi i32 [ [[ADD_EPIL]], [[LATCH_EPIL]] ] ; ENABLED-NEXT: br label [[LATCHEXIT]] ; ENABLED: latchexit: -; ENABLED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA_PH]], [[LATCHEXIT_UNR_LCSSA]] ], [ [[SUM_0_LCSSA_PH1]], [[LATCHEXIT_EPILOG_LCSSA]] ] +; ENABLED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA_PH_PH]], [[LATCHEXIT_UNR_LCSSA]] ], [ [[SUM_0_LCSSA_PH1]], [[LATCHEXIT_EPILOG_LCSSA]] ] ; ENABLED-NEXT: ret i32 [[SUM_0_LCSSA]] ; ENABLED: otherexit.loopexit: ; ENABLED-NEXT: [[RVAL_PH:%.*]] = phi i32 [ [[SUM_02]], [[FOR_EXITING_BLOCK]] ], [ [[ADD]], [[FOR_EXITING_BLOCK_1]] ], [ [[ADD_1]], [[FOR_EXITING_BLOCK_2]] ], [ [[ADD_2]], [[FOR_EXITING_BLOCK_3]] ], [ [[ADD_3]], [[FOR_EXITING_BLOCK_4]] ], [ [[ADD_4]], [[FOR_EXITING_BLOCK_5]] ], [ [[ADD_5]], [[FOR_EXITING_BLOCK_6]] ], [ [[ADD_6]], [[FOR_EXITING_BLOCK_7]] ] ; ENABLED-NEXT: br label [[OTHEREXIT:%.*]] -; ENABLED: otherexit.loopexit2: +; ENABLED: otherexit.loopexit3: ; ENABLED-NEXT: [[RVAL_PH3:%.*]] = phi i32 [ [[SUM_02_EPIL]], [[FOR_EXITING_BLOCK_EPIL]] ] ; ENABLED-NEXT: br label [[OTHEREXIT]] ; ENABLED: otherexit: @@ -747,7 +744,7 @@ define i32 @test5(ptr nocapture %a, i64 %n) { ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP0]], 7 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i64 [[TMP1]], 7 -; CHECK-NEXT: br i1 [[TMP2]], label [[LATCHEXIT_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP2]], label [[HEADER_EPIL_PREHEADER:%.*]], label [[ENTRY_NEW:%.*]] ; CHECK: entry.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[TMP0]], -8 ; CHECK-NEXT: br label [[HEADER:%.*]] @@ -822,20 +819,19 @@ define i32 @test5(ptr nocapture %a, i64 %n) { ; CHECK-NEXT: [[INDVARS_IV_NEXT_7]] = add i64 [[INDVARS_IV]], 8 ; CHECK-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8 ; CHECK-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label [[LATCHEXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[HEADER]] -; CHECK: latchexit.unr-lcssa.loopexit: -; CHECK-NEXT: br label [[LATCHEXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label [[LATCHEXIT_UNR_LCSSA:%.*]], label [[HEADER]] ; CHECK: latchexit.unr-lcssa: -; CHECK-NEXT: [[SUM_0_LCSSA_PH:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[ADD_7]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT_7]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[SUM_02_UNR:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD_7]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] ; CHECK-NEXT: [[LCMP_MOD_NOT:%.*]] = icmp eq i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label [[LATCHEXIT:%.*]], label [[HEADER_EPIL_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label [[LATCHEXIT:%.*]], label [[HEADER_EPIL_PREHEADER]] ; CHECK: header.epil.preheader: +; CHECK-NEXT: [[INDVARS_IV_EPIL_INIT:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT_7]], [[LATCHEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[SUM_02_EPIL_INIT:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD_7]], [[LATCHEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD3:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD3]]) ; CHECK-NEXT: br label [[HEADER_EPIL:%.*]] ; CHECK: header.epil: -; CHECK-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[LATCH_EPIL:%.*]] ], [ [[INDVARS_IV_UNR]], [[HEADER_EPIL_PREHEADER]] ] -; CHECK-NEXT: [[SUM_02_EPIL:%.*]] = phi i32 [ [[ADD_EPIL:%.*]], [[LATCH_EPIL]] ], [ [[SUM_02_UNR]], [[HEADER_EPIL_PREHEADER]] ] +; CHECK-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[LATCH_EPIL:%.*]] ], [ [[INDVARS_IV_EPIL_INIT]], [[HEADER_EPIL_PREHEADER]] ] +; CHECK-NEXT: [[SUM_02_EPIL:%.*]] = phi i32 [ [[ADD_EPIL:%.*]], [[LATCH_EPIL]] ], [ [[SUM_02_EPIL_INIT]], [[HEADER_EPIL_PREHEADER]] ] ; CHECK-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ [[EPIL_ITER_NEXT:%.*]], [[LATCH_EPIL]] ], [ 0, [[HEADER_EPIL_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_EXITING_BLOCK_EPIL:%.*]] ; CHECK: for.exiting_block.epil: @@ -852,11 +848,11 @@ define i32 @test5(ptr nocapture %a, i64 %n) { ; CHECK: latchexit.epilog-lcssa: ; CHECK-NEXT: br label [[LATCHEXIT]] ; CHECK: latchexit: -; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA_PH]], [[LATCHEXIT_UNR_LCSSA]] ], [ [[ADD_EPIL]], [[LATCHEXIT_EPILOG_LCSSA]] ] +; CHECK-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[ADD_7]], [[LATCHEXIT_UNR_LCSSA]] ], [ [[ADD_EPIL]], [[LATCHEXIT_EPILOG_LCSSA]] ] ; CHECK-NEXT: ret i32 [[SUM_0_LCSSA]] ; CHECK: otherexit.loopexit: ; CHECK-NEXT: br label [[OTHEREXIT:%.*]] -; CHECK: otherexit.loopexit3: +; CHECK: otherexit.loopexit4: ; CHECK-NEXT: br label [[OTHEREXIT]] ; CHECK: otherexit: ; CHECK-NEXT: [[SUM_02_LCSSA:%.*]] = phi i32 [ [[SUM_02]], [[OTHEREXIT_LOOPEXIT]] ], [ [[SUM_02_EPIL]], [[OTHEREXIT_LOOPEXIT3]] ] @@ -899,7 +895,7 @@ define i32 @test5(ptr nocapture %a, i64 %n) { ; ENABLED-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], -1 ; ENABLED-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP0]], 7 ; ENABLED-NEXT: [[TMP2:%.*]] = icmp ult i64 [[TMP1]], 7 -; ENABLED-NEXT: br i1 [[TMP2]], label [[LATCHEXIT_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]] +; ENABLED-NEXT: br i1 [[TMP2]], label [[HEADER_EPIL_PREHEADER:%.*]], label [[ENTRY_NEW:%.*]] ; ENABLED: entry.new: ; ENABLED-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[TMP0]], [[XTRAITER]] ; ENABLED-NEXT: br label [[HEADER:%.*]] @@ -981,23 +977,22 @@ define i32 @test5(ptr nocapture %a, i64 %n) { ; ENABLED-NEXT: [[INDVARS_IV_NEXT_7]] = add i64 [[INDVARS_IV]], 8 ; ENABLED-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8 ; ENABLED-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]] -; ENABLED-NEXT: br i1 [[NITER_NCMP_7]], label [[LATCHEXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[HEADER]] -; ENABLED: latchexit.unr-lcssa.loopexit: +; ENABLED-NEXT: br i1 [[NITER_NCMP_7]], label [[LATCHEXIT_UNR_LCSSA:%.*]], label [[HEADER]] +; ENABLED: latchexit.unr-lcssa: ; ENABLED-NEXT: [[SUM_0_LCSSA_PH_PH:%.*]] = phi i32 [ [[ADD_7]], [[LATCH_7]] ] ; ENABLED-NEXT: [[INDVARS_IV_UNR_PH:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_7]], [[LATCH_7]] ] ; ENABLED-NEXT: [[SUM_02_UNR_PH:%.*]] = phi i32 [ [[ADD_7]], [[LATCH_7]] ] -; ENABLED-NEXT: br label [[LATCHEXIT_UNR_LCSSA]] -; ENABLED: latchexit.unr-lcssa: -; ENABLED-NEXT: [[SUM_0_LCSSA_PH:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[SUM_0_LCSSA_PH_PH]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] -; ENABLED-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_UNR_PH]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] -; ENABLED-NEXT: [[SUM_02_UNR:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_02_UNR_PH]], [[LATCHEXIT_UNR_LCSSA_LOOPEXIT]] ] ; ENABLED-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; ENABLED-NEXT: br i1 [[LCMP_MOD]], label [[HEADER_EPIL_PREHEADER:%.*]], label [[LATCHEXIT:%.*]] +; ENABLED-NEXT: br i1 [[LCMP_MOD]], label [[HEADER_EPIL_PREHEADER]], label [[LATCHEXIT:%.*]] ; ENABLED: header.epil.preheader: +; ENABLED-NEXT: [[INDVARS_IV_EPIL_INIT:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_UNR_PH]], [[LATCHEXIT_UNR_LCSSA]] ] +; ENABLED-NEXT: [[SUM_02_EPIL_INIT:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[SUM_02_UNR_PH]], [[LATCHEXIT_UNR_LCSSA]] ] +; ENABLED-NEXT: [[LCMP_MOD3:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; ENABLED-NEXT: call void @llvm.assume(i1 [[LCMP_MOD3]]) ; ENABLED-NEXT: br label [[HEADER_EPIL:%.*]] ; ENABLED: header.epil: -; ENABLED-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[LATCH_EPIL:%.*]] ], [ [[INDVARS_IV_UNR]], [[HEADER_EPIL_PREHEADER]] ] -; ENABLED-NEXT: [[SUM_02_EPIL:%.*]] = phi i32 [ [[ADD_EPIL:%.*]], [[LATCH_EPIL]] ], [ [[SUM_02_UNR]], [[HEADER_EPIL_PREHEADER]] ] +; ENABLED-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], [[LATCH_EPIL:%.*]] ], [ [[INDVARS_IV_EPIL_INIT]], [[HEADER_EPIL_PREHEADER]] ] +; ENABLED-NEXT: [[SUM_02_EPIL:%.*]] = phi i32 [ [[ADD_EPIL:%.*]], [[LATCH_EPIL]] ], [ [[SUM_02_EPIL_INIT]], [[HEADER_EPIL_PREHEADER]] ] ; ENABLED-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ 0, [[HEADER_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], [[LATCH_EPIL]] ] ; ENABLED-NEXT: br label [[FOR_EXITING_BLOCK_EPIL:%.*]] ; ENABLED: for.exiting_block.epil: @@ -1016,13 +1011,13 @@ define i32 @test5(ptr nocapture %a, i64 %n) { ; ENABLED-NEXT: [[SUM_0_LCSSA_PH2:%.*]] = phi i32 [ [[ADD_EPIL]], [[LATCH_EPIL]] ] ; ENABLED-NEXT: br label [[LATCHEXIT]] ; ENABLED: latchexit: -; ENABLED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA_PH]], [[LATCHEXIT_UNR_LCSSA]] ], [ [[SUM_0_LCSSA_PH2]], [[LATCHEXIT_EPILOG_LCSSA]] ] +; ENABLED-NEXT: [[SUM_0_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA_PH_PH]], [[LATCHEXIT_UNR_LCSSA]] ], [ [[SUM_0_LCSSA_PH2]], [[LATCHEXIT_EPILOG_LCSSA]] ] ; ENABLED-NEXT: ret i32 [[SUM_0_LCSSA]] ; ENABLED: otherexit.loopexit: ; ENABLED-NEXT: [[SUM_02_LCSSA_PH:%.*]] = phi i32 [ [[SUM_02]], [[FOR_EXITING_BLOCK]] ], [ [[ADD]], [[FOR_EXITING_BLOCK_1]] ], [ [[ADD_1]], [[FOR_EXITING_BLOCK_2]] ], [ [[ADD_2]], [[FOR_EXITING_BLOCK_3]] ], [ [[ADD_3]], [[FOR_EXITING_BLOCK_4]] ], [ [[ADD_4]], [[FOR_EXITING_BLOCK_5]] ], [ [[ADD_5]], [[FOR_EXITING_BLOCK_6]] ], [ [[ADD_6]], [[FOR_EXITING_BLOCK_7]] ] ; ENABLED-NEXT: [[RVAL_PH:%.*]] = phi i32 [ [[SUM_02]], [[FOR_EXITING_BLOCK]] ], [ [[ADD]], [[FOR_EXITING_BLOCK_1]] ], [ [[ADD_1]], [[FOR_EXITING_BLOCK_2]] ], [ [[ADD_2]], [[FOR_EXITING_BLOCK_3]] ], [ [[ADD_3]], [[FOR_EXITING_BLOCK_4]] ], [ [[ADD_4]], [[FOR_EXITING_BLOCK_5]] ], [ [[ADD_5]], [[FOR_EXITING_BLOCK_6]] ], [ [[ADD_6]], [[FOR_EXITING_BLOCK_7]] ] ; ENABLED-NEXT: br label [[OTHEREXIT:%.*]] -; ENABLED: otherexit.loopexit3: +; ENABLED: otherexit.loopexit4: ; ENABLED-NEXT: [[SUM_02_LCSSA_PH4:%.*]] = phi i32 [ [[SUM_02_EPIL]], [[FOR_EXITING_BLOCK_EPIL]] ] ; ENABLED-NEXT: [[RVAL_PH5:%.*]] = phi i32 [ [[SUM_02_EPIL]], [[FOR_EXITING_BLOCK_EPIL]] ] ; ENABLED-NEXT: br label [[OTHEREXIT]] diff --git a/llvm/test/Transforms/LoopUnroll/runtime-unroll-assume-no-remainder.ll b/llvm/test/Transforms/LoopUnroll/runtime-unroll-assume-no-remainder.ll index 81fceb6..73f7fd3 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-unroll-assume-no-remainder.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-unroll-assume-no-remainder.ll @@ -91,7 +91,7 @@ define dso_local void @cannotProveDivisibleTC(ptr noalias nocapture %a, ptr noal ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[N]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[TMP0]], 1 -; CHECK-NEXT: br i1 [[TMP1]], label [[EXIT_LOOPEXIT_UNR_LCSSA:%.*]], label [[FOR_BODY_PREHEADER_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP1]], label [[FOR_BODY_EPIL_PREHEADER:%.*]], label [[FOR_BODY_PREHEADER_NEW:%.*]] ; CHECK: for.body.preheader.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i32 [[N]], [[XTRAITER]] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] @@ -112,15 +112,15 @@ define dso_local void @cannotProveDivisibleTC(ptr noalias nocapture %a, ptr noal ; CHECK-NEXT: [[INC_1]] = add nuw nsw i32 [[I_011]], 2 ; CHECK-NEXT: [[NITER_NEXT_1]] = add i32 [[NITER]], 2 ; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp ne i32 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[FOR_BODY]], label [[EXIT_LOOPEXIT_UNR_LCSSA_LOOPEXIT:%.*]], !llvm.loop [[LOOP2:![0-9]+]] -; CHECK: exit.loopexit.unr-lcssa.loopexit: -; CHECK-NEXT: [[I_011_UNR_PH:%.*]] = phi i32 [ [[INC_1]], [[FOR_BODY]] ] -; CHECK-NEXT: br label [[EXIT_LOOPEXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[FOR_BODY]], label [[EXIT_LOOPEXIT_UNR_LCSSA:%.*]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK: exit.loopexit.unr-lcssa: -; CHECK-NEXT: [[I_011_UNR:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[I_011_UNR_PH]], [[EXIT_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[I_011_UNR1:%.*]] = phi i32 [ [[INC_1]], [[FOR_BODY]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[FOR_BODY_EPIL_PREHEADER:%.*]], label [[EXIT_LOOPEXIT:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[FOR_BODY_EPIL_PREHEADER]], label [[EXIT_LOOPEXIT:%.*]] ; CHECK: for.body.epil.preheader: +; CHECK-NEXT: [[I_011_UNR:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[I_011_UNR1]], [[EXIT_LOOPEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label [[FOR_BODY_EPIL:%.*]] ; CHECK: for.body.epil: ; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[I_011_UNR]] diff --git a/llvm/test/Transforms/LoopUnroll/runtime-unroll-reductions.ll b/llvm/test/Transforms/LoopUnroll/runtime-unroll-reductions.ll index 0b9c6ac..a5ac2cf4 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-unroll-reductions.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-unroll-reductions.ll @@ -8,7 +8,7 @@ define i32 @test_add_reduction(ptr %a, i64 %n) { ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 -; CHECK-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; CHECK-NEXT: br i1 [[TMP1]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; CHECK: [[ENTRY_NEW]]: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] ; CHECK-NEXT: br label %[[LOOP:.*]] @@ -27,28 +27,27 @@ define i32 @test_add_reduction(ptr %a, i64 %n) { ; CHECK-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 ; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 ; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP0:![0-9]+]] -; CHECK: [[EXIT_UNR_LCSSA_LOOPEXIT]]: -; CHECK-NEXT: [[RES_PH_PH:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_1]], %[[LOOP]] ] -; CHECK-NEXT: [[RDX_UNR_PH:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ] -; CHECK-NEXT: [[BIN_RDX:%.*]] = add i32 [[RDX_NEXT_1]], [[RDX_NEXT]] -; CHECK-NEXT: br label %[[EXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[LOOP]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[EXIT_UNR_LCSSA]]: -; CHECK-NEXT: [[RES_PH:%.*]] = phi i32 [ poison, %[[ENTRY]] ], [ [[BIN_RDX]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[RDX_UNR:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[BIN_RDX]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[RES_PH:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ [[IV_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_UNR1:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[BIN_RDX:%.*]] = add i32 [[RDX_NEXT_1]], [[RDX_NEXT]] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER]], label %[[EXIT:.*]] ; CHECK: [[LOOP_EPIL_PREHEADER]]: +; CHECK-NEXT: [[IV_EPIL_INIT:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR]], %[[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[RDX_UNR:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[BIN_RDX]], %[[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) ; CHECK-NEXT: br label %[[LOOP_EPIL:.*]] ; CHECK: [[LOOP_EPIL]]: -; CHECK-NEXT: [[GEP_A_EPIL:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_UNR]] +; CHECK-NEXT: [[GEP_A_EPIL:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_EPIL_INIT]] ; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[GEP_A_EPIL]], align 2 ; CHECK-NEXT: [[RDX_NEXT_EPIL:%.*]] = add nuw nsw i32 [[RDX_UNR]], [[TMP4]] ; CHECK-NEXT: br label %[[EXIT]] ; CHECK: [[EXIT]]: -; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[RES_PH]], %[[EXIT_UNR_LCSSA]] ], [ [[RDX_NEXT_EPIL]], %[[LOOP_EPIL]] ] +; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[BIN_RDX]], %[[EXIT_UNR_LCSSA]] ], [ [[RDX_NEXT_EPIL]], %[[LOOP_EPIL]] ] ; CHECK-NEXT: ret i32 [[RES]] ; entry: @@ -76,7 +75,7 @@ define i32 @test_add_reduction_constant_op(ptr %a, i64 %n) { ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 -; CHECK-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; CHECK-NEXT: br i1 [[TMP1]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; CHECK: [[ENTRY_NEW]]: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] ; CHECK-NEXT: br label %[[LOOP:.*]] @@ -88,17 +87,16 @@ define i32 @test_add_reduction_constant_op(ptr %a, i64 %n) { ; CHECK-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 ; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 ; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP2:![0-9]+]] -; CHECK: [[EXIT_UNR_LCSSA_LOOPEXIT]]: -; CHECK-NEXT: [[RES_PH_PH:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ] -; CHECK-NEXT: [[RDX_UNR_PH:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ] -; CHECK-NEXT: br label %[[EXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[LOOP]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK: [[EXIT_UNR_LCSSA]]: -; CHECK-NEXT: [[RES_PH:%.*]] = phi i32 [ poison, %[[ENTRY]] ], [ [[RES_PH_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[RDX_UNR:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[RES_PH:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_UNR1:%.*]] = phi i32 [ [[RDX_NEXT_1]], %[[LOOP]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER]], label %[[EXIT:.*]] ; CHECK: [[LOOP_EPIL_PREHEADER]]: +; CHECK-NEXT: [[RDX_UNR:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_UNR1]], %[[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) ; CHECK-NEXT: br label %[[LOOP_EPIL:.*]] ; CHECK: [[LOOP_EPIL]]: ; CHECK-NEXT: [[RDX_NEXT_EPIL:%.*]] = add nuw nsw i32 [[RDX_UNR]], 1 @@ -130,7 +128,7 @@ define i32 @test_add_reduction_8x_unroll(ptr %a, i64 %n) { ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 7 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 7 -; CHECK-NEXT: br i1 [[TMP1]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; CHECK-NEXT: br i1 [[TMP1]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; CHECK: [[ENTRY_NEW]]: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] ; CHECK-NEXT: br label %[[LOOP:.*]] @@ -172,23 +170,22 @@ define i32 @test_add_reduction_8x_unroll(ptr %a, i64 %n) { ; CHECK-NEXT: [[IV_NEXT_7]] = add nuw nsw i64 [[IV]], 8 ; CHECK-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8 ; CHECK-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] -; CHECK: [[EXIT_UNR_LCSSA_LOOPEXIT]]: -; CHECK-NEXT: [[RES_PH_PH:%.*]] = phi i32 [ [[RDX_NEXT_7]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_7]], %[[LOOP]] ] -; CHECK-NEXT: [[RDX_UNR_PH:%.*]] = phi i32 [ [[RDX_NEXT_7]], %[[LOOP]] ] -; CHECK-NEXT: br label %[[EXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label %[[EXIT_UNR_LCSSA:.*]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[EXIT_UNR_LCSSA]]: -; CHECK-NEXT: [[RES_PH:%.*]] = phi i32 [ poison, %[[ENTRY]] ], [ [[RES_PH_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[RDX_UNR:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[RES_PH:%.*]] = phi i32 [ [[RDX_NEXT_7]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ [[IV_NEXT_7]], %[[LOOP]] ] +; CHECK-NEXT: [[RDX_UNR:%.*]] = phi i32 [ [[RDX_NEXT_7]], %[[LOOP]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER:.*]], label %[[EXIT:.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[LOOP_EPIL_PREHEADER]], label %[[EXIT:.*]] ; CHECK: [[LOOP_EPIL_PREHEADER]]: +; CHECK-NEXT: [[IV_EPIL_INIT:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR]], %[[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[RDX_EPIL_INIT:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[RDX_UNR]], %[[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) ; CHECK-NEXT: br label %[[LOOP_EPIL:.*]] ; CHECK: [[LOOP_EPIL]]: -; CHECK-NEXT: [[IV_EPIL:%.*]] = phi i64 [ [[IV_UNR]], %[[LOOP_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] -; CHECK-NEXT: [[RDX_EPIL:%.*]] = phi i32 [ [[RDX_UNR]], %[[LOOP_EPIL_PREHEADER]] ], [ [[RDX_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] +; CHECK-NEXT: [[IV_EPIL:%.*]] = phi i64 [ [[IV_EPIL_INIT]], %[[LOOP_EPIL_PREHEADER]] ], [ [[IV_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] +; CHECK-NEXT: [[RDX_EPIL:%.*]] = phi i32 [ [[RDX_EPIL_INIT]], %[[LOOP_EPIL_PREHEADER]] ], [ [[RDX_NEXT_EPIL:%.*]], %[[LOOP_EPIL]] ] ; CHECK-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ 0, %[[LOOP_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], %[[LOOP_EPIL]] ] ; CHECK-NEXT: [[GEP_A_EPIL:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV_EPIL]] ; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[GEP_A_EPIL]], align 2 diff --git a/llvm/test/Transforms/LoopUnroll/runtime-unroll-remainder.ll b/llvm/test/Transforms/LoopUnroll/runtime-unroll-remainder.ll index a3cfeac..5f4bbf1 100644 --- a/llvm/test/Transforms/LoopUnroll/runtime-unroll-remainder.ll +++ b/llvm/test/Transforms/LoopUnroll/runtime-unroll-remainder.ll @@ -11,31 +11,30 @@ define i32 @unroll(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %N) ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 3 ; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[N]], 4 -; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA:%.*]], label [[FOR_BODY_LR_PH_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_BODY_EPIL_PREHEADER:%.*]], label [[FOR_BODY_LR_PH_NEW:%.*]] ; CHECK: for.body.lr.ph.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 4294967292 ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.cond.cleanup.loopexit.unr-lcssa.loopexit: -; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]] ; CHECK: for.cond.cleanup.loopexit.unr-lcssa: -; CHECK-NEXT: [[ADD_LCSSA_PH:%.*]] = phi i32 [ poison, [[FOR_BODY_LR_PH]] ], [ [[ADD_3:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA_LOOPEXIT:%.*]] ] -; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INDVARS_IV_NEXT_3:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[C_010_UNR:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[ADD_3]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] ; CHECK-NEXT: [[LCMP_MOD_NOT:%.*]] = icmp eq i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY_EPIL_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY_EPIL_PREHEADER]] ; CHECK: for.body.epil.preheader: +; CHECK-NEXT: [[INDVARS_IV_EPIL_INIT:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INDVARS_IV_NEXT_3:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA:%.*]] ] +; CHECK-NEXT: [[C_010_EPIL_INIT:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[ADD_3:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) ; CHECK-NEXT: br label [[FOR_BODY_EPIL:%.*]] ; CHECK: for.body.epil: -; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV_UNR]] +; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV_EPIL_INIT]] ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4 -; CHECK-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV_UNR]] +; CHECK-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV_EPIL_INIT]] ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX2_EPIL]], align 4 ; CHECK-NEXT: [[MUL_EPIL:%.*]] = mul nsw i32 [[TMP2]], [[TMP1]] -; CHECK-NEXT: [[ADD_EPIL:%.*]] = add nsw i32 [[MUL_EPIL]], [[C_010_UNR]] +; CHECK-NEXT: [[ADD_EPIL:%.*]] = add nsw i32 [[MUL_EPIL]], [[C_010_EPIL_INIT]] ; CHECK-NEXT: [[EPIL_ITER_CMP_NOT:%.*]] = icmp eq i64 [[XTRAITER]], 1 ; CHECK-NEXT: br i1 [[EPIL_ITER_CMP_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT_EPILOG_LCSSA:%.*]], label [[FOR_BODY_EPIL_1:%.*]] ; CHECK: for.body.epil.1: -; CHECK-NEXT: [[INDVARS_IV_NEXT_EPIL:%.*]] = add nuw nsw i64 [[INDVARS_IV_UNR]], 1 +; CHECK-NEXT: [[INDVARS_IV_NEXT_EPIL:%.*]] = add nuw nsw i64 [[INDVARS_IV_EPIL_INIT]], 1 ; CHECK-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_EPIL]] ; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX_EPIL_1]], align 4 ; CHECK-NEXT: [[ARRAYIDX2_EPIL_1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV_NEXT_EPIL]] @@ -45,7 +44,7 @@ define i32 @unroll(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %N) ; CHECK-NEXT: [[EPIL_ITER_CMP_1_NOT:%.*]] = icmp eq i64 [[XTRAITER]], 2 ; CHECK-NEXT: br i1 [[EPIL_ITER_CMP_1_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT_EPILOG_LCSSA]], label [[FOR_BODY_EPIL_2:%.*]] ; CHECK: for.body.epil.2: -; CHECK-NEXT: [[INDVARS_IV_NEXT_EPIL_1:%.*]] = add nuw nsw i64 [[INDVARS_IV_UNR]], 2 +; CHECK-NEXT: [[INDVARS_IV_NEXT_EPIL_1:%.*]] = add nuw nsw i64 [[INDVARS_IV_EPIL_INIT]], 2 ; CHECK-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT_EPIL_1]] ; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX_EPIL_2]], align 4 ; CHECK-NEXT: [[ARRAYIDX2_EPIL_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV_NEXT_EPIL_1]] @@ -57,7 +56,7 @@ define i32 @unroll(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %N) ; CHECK-NEXT: [[ADD_LCSSA_PH1:%.*]] = phi i32 [ [[ADD_EPIL]], [[FOR_BODY_EPIL]] ], [ [[ADD_EPIL_1]], [[FOR_BODY_EPIL_1]] ], [ [[ADD_EPIL_2]], [[FOR_BODY_EPIL_2]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP_LOOPEXIT]] ; CHECK: for.cond.cleanup.loopexit: -; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD_LCSSA_PH]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]] ], [ [[ADD_LCSSA_PH1]], [[FOR_COND_CLEANUP_LOOPEXIT_EPILOG_LCSSA]] ] +; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD_3]], [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]] ], [ [[ADD_LCSSA_PH1]], [[FOR_COND_CLEANUP_LOOPEXIT_EPILOG_LCSSA]] ] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: [[C_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] @@ -96,7 +95,7 @@ define i32 @unroll(ptr nocapture readonly %a, ptr nocapture readonly %b, i32 %N) ; CHECK-NEXT: [[INDVARS_IV_NEXT_3]] = add nuw nsw i64 [[INDVARS_IV]], 4 ; CHECK-NEXT: [[NITER_NEXT_3]] = add i64 [[NITER]], 4 ; CHECK-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i64 [[NITER_NEXT_3]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; entry: %cmp9 = icmp eq i32 %N, 0 diff --git a/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll b/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll index 0a3d201..fd07238 100644 --- a/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll +++ b/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll @@ -30,7 +30,7 @@ define i32 @f(i1 %cond1) #0 !prof !0 { ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[LD_LCSSA]], 1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP0]], 7 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[LD_LCSSA]], 7 -; CHECK-NEXT: br i1 [[TMP1]], label [[EXIT2_UNR_LCSSA:%.*]], label [[ENTRY2_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP1]], label [[LOOP2_EPIL_PREHEADER:%.*]], label [[ENTRY2_NEW:%.*]] ; CHECK: entry2.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[TMP0]], [[XTRAITER]] ; CHECK-NEXT: br label [[LOOP2:%.*]] @@ -40,18 +40,18 @@ define i32 @f(i1 %cond1) #0 !prof !0 { ; CHECK-NEXT: [[INC_7]] = add i64 [[PHI]], 8 ; CHECK-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8 ; CHECK-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label [[EXIT2_UNR_LCSSA_LOOPEXIT:%.*]], label [[LOOP2]] -; CHECK: exit2.unr-lcssa.loopexit: -; CHECK-NEXT: [[PHI_UNR_PH:%.*]] = phi i64 [ [[INC_7]], [[LOOP2]] ] -; CHECK-NEXT: br label [[EXIT2_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_7]], label [[EXIT2_UNR_LCSSA:%.*]], label [[LOOP2]] ; CHECK: exit2.unr-lcssa: -; CHECK-NEXT: [[PHI_UNR:%.*]] = phi i64 [ 0, [[ENTRY2]] ], [ [[PHI_UNR_PH]], [[EXIT2_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[PHI_UNR:%.*]] = phi i64 [ [[INC_7]], [[LOOP2]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[LOOP2_EPIL_PREHEADER:%.*]], label [[EXIT2:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[LOOP2_EPIL_PREHEADER]], label [[EXIT2:%.*]] ; CHECK: loop2.epil.preheader: +; CHECK-NEXT: [[PHI_EPIL_INIT:%.*]] = phi i64 [ 0, [[ENTRY2]] ], [ [[PHI_UNR]], [[EXIT2_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) ; CHECK-NEXT: br label [[LOOP2_EPIL:%.*]] ; CHECK: loop2.epil: -; CHECK-NEXT: [[PHI_EPIL:%.*]] = phi i64 [ [[PHI_UNR]], [[LOOP2_EPIL_PREHEADER]] ], [ [[INC_EPIL:%.*]], [[LOOP2_EPIL]] ] +; CHECK-NEXT: [[PHI_EPIL:%.*]] = phi i64 [ [[PHI_EPIL_INIT]], [[LOOP2_EPIL_PREHEADER]] ], [ [[INC_EPIL:%.*]], [[LOOP2_EPIL]] ] ; CHECK-NEXT: [[EPIL_ITER:%.*]] = phi i64 [ 0, [[LOOP2_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], [[LOOP2_EPIL]] ] ; CHECK-NEXT: [[INC_EPIL]] = add i64 [[PHI_EPIL]], 1 ; CHECK-NEXT: [[COND2_EPIL:%.*]] = icmp eq i64 [[LD_LCSSA]], [[PHI_EPIL]] diff --git a/llvm/test/Transforms/LoopUnroll/tripcount-overflow.ll b/llvm/test/Transforms/LoopUnroll/tripcount-overflow.ll index 1481286..f839c88 100644 --- a/llvm/test/Transforms/LoopUnroll/tripcount-overflow.ll +++ b/llvm/test/Transforms/LoopUnroll/tripcount-overflow.ll @@ -17,7 +17,7 @@ define i32 @foo(i32 %N) { ; EPILOG-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], 1 ; EPILOG-NEXT: [[XTRAITER:%.*]] = and i32 [[TMP0]], 1 ; EPILOG-NEXT: [[TMP1:%.*]] = icmp ult i32 [[N]], 1 -; EPILOG-NEXT: br i1 [[TMP1]], label [[WHILE_END_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]] +; EPILOG-NEXT: br i1 [[TMP1]], label [[WHILE_BODY_EPIL_PREHEADER:%.*]], label [[ENTRY_NEW:%.*]] ; EPILOG: entry.new: ; EPILOG-NEXT: [[UNROLL_ITER:%.*]] = sub i32 [[TMP0]], [[XTRAITER]] ; EPILOG-NEXT: br label [[WHILE_BODY:%.*]] @@ -28,22 +28,21 @@ define i32 @foo(i32 %N) { ; EPILOG-NEXT: [[INC_1]] = add i32 [[I]], 2 ; EPILOG-NEXT: [[NITER_NEXT_1]] = add i32 [[NITER]], 2 ; EPILOG-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i32 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; EPILOG-NEXT: br i1 [[NITER_NCMP_1]], label [[WHILE_END_UNR_LCSSA_LOOPEXIT:%.*]], label [[WHILE_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; EPILOG: while.end.unr-lcssa.loopexit: +; EPILOG-NEXT: br i1 [[NITER_NCMP_1]], label [[WHILE_END_UNR_LCSSA:%.*]], label [[WHILE_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; EPILOG: while.end.unr-lcssa: ; EPILOG-NEXT: [[I_LCSSA_PH_PH:%.*]] = phi i32 [ [[INC]], [[WHILE_BODY]] ] ; EPILOG-NEXT: [[I_UNR_PH:%.*]] = phi i32 [ [[INC_1]], [[WHILE_BODY]] ] -; EPILOG-NEXT: br label [[WHILE_END_UNR_LCSSA]] -; EPILOG: while.end.unr-lcssa: -; EPILOG-NEXT: [[I_LCSSA_PH:%.*]] = phi i32 [ poison, [[ENTRY:%.*]] ], [ [[I_LCSSA_PH_PH]], [[WHILE_END_UNR_LCSSA_LOOPEXIT]] ] -; EPILOG-NEXT: [[I_UNR:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[I_UNR_PH]], [[WHILE_END_UNR_LCSSA_LOOPEXIT]] ] ; EPILOG-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 -; EPILOG-NEXT: br i1 [[LCMP_MOD]], label [[WHILE_BODY_EPIL_PREHEADER:%.*]], label [[WHILE_END:%.*]] +; EPILOG-NEXT: br i1 [[LCMP_MOD]], label [[WHILE_BODY_EPIL_PREHEADER]], label [[WHILE_END:%.*]] ; EPILOG: while.body.epil.preheader: +; EPILOG-NEXT: [[I_EPIL_INIT:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[I_UNR_PH]], [[WHILE_END_UNR_LCSSA]] ] +; EPILOG-NEXT: [[LCMP_MOD2:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; EPILOG-NEXT: call void @llvm.assume(i1 [[LCMP_MOD2]]) ; EPILOG-NEXT: br label [[WHILE_BODY_EPIL:%.*]] ; EPILOG: while.body.epil: ; EPILOG-NEXT: br label [[WHILE_END]] ; EPILOG: while.end: -; EPILOG-NEXT: [[I_LCSSA:%.*]] = phi i32 [ [[I_LCSSA_PH]], [[WHILE_END_UNR_LCSSA]] ], [ [[I_UNR]], [[WHILE_BODY_EPIL]] ] +; EPILOG-NEXT: [[I_LCSSA:%.*]] = phi i32 [ [[I_LCSSA_PH_PH]], [[WHILE_END_UNR_LCSSA]] ], [ [[I_EPIL_INIT]], [[WHILE_BODY_EPIL]] ] ; EPILOG-NEXT: ret i32 [[I_LCSSA]] ; ; PROLOG-LABEL: @foo( diff --git a/llvm/test/Transforms/LoopUnroll/unroll-heuristics-pgo.ll b/llvm/test/Transforms/LoopUnroll/unroll-heuristics-pgo.ll index 20a247f..611ee5f 100644 --- a/llvm/test/Transforms/LoopUnroll/unroll-heuristics-pgo.ll +++ b/llvm/test/Transforms/LoopUnroll/unroll-heuristics-pgo.ll @@ -8,7 +8,7 @@ ; CHECK: %mul.1 = mul ; CHECK: %mul.2 = mul ; CHECK: %mul.3 = mul -; CHECK: br i1 %niter.ncmp.7, label %loop.end.unr-lcssa.loopexit, label %loop, !prof [[PROF0:![0-9]+]] +; CHECK: br i1 %niter.ncmp.7, label %loop.end.unr-lcssa, label %loop, !prof [[PROF0:![0-9]+]] ; CHECK: loop.epil: ; CHECK: br i1 %epil.iter.cmp, label %loop.epil, label %loop.end.epilog-lcssa, !prof [[PROF1:![0-9]+]], !llvm.loop {{![0-9]+}} define i32 @bar_prof(ptr noalias nocapture readonly %src, i64 %c) !prof !1 { diff --git a/llvm/test/Transforms/LoopUnroll/unroll-loads-cse.ll b/llvm/test/Transforms/LoopUnroll/unroll-loads-cse.ll index d410525..f85aac7 100644 --- a/llvm/test/Transforms/LoopUnroll/unroll-loads-cse.ll +++ b/llvm/test/Transforms/LoopUnroll/unroll-loads-cse.ll @@ -12,7 +12,7 @@ define void @cse_matching_load_from_previous_unrolled_iteration(ptr %src, ptr no ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 -; CHECK-NEXT: br i1 [[TMP1]], label [[EXIT_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP1]], label [[LOOP_EPIL_PREHEADER:%.*]], label [[ENTRY_NEW:%.*]] ; CHECK: entry.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] ; CHECK-NEXT: br label [[LOOP:%.*]] @@ -35,15 +35,15 @@ define void @cse_matching_load_from_previous_unrolled_iteration(ptr %src, ptr no ; CHECK-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 ; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 ; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP0:![0-9]+]] -; CHECK: exit.unr-lcssa.loopexit: -; CHECK-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_1]], [[LOOP]] ] -; CHECK-NEXT: br label [[EXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_UNR_LCSSA:%.*]], label [[LOOP]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: exit.unr-lcssa: -; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_UNR_PH]], [[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[IV_UNR1:%.*]] = phi i64 [ [[IV_NEXT_1]], [[LOOP]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[LOOP_EPIL_PREHEADER:%.*]], label [[EXIT:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[LOOP_EPIL_PREHEADER]], label [[EXIT:%.*]] ; CHECK: loop.epil.preheader: +; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_UNR1]], [[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label [[LOOP_EPIL:%.*]] ; CHECK: loop.epil: ; CHECK-NEXT: [[GEP_SRC_12_EPIL:%.*]] = getelementptr i64, ptr [[SRC_12]], i64 [[IV_UNR]] @@ -88,7 +88,7 @@ define void @cse_different_load_types(ptr %src, ptr noalias %dst, i64 %N) { ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 -; CHECK-NEXT: br i1 [[TMP1]], label [[EXIT_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP1]], label [[LOOP_EPIL_PREHEADER:%.*]], label [[ENTRY_NEW:%.*]] ; CHECK: entry.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] ; CHECK-NEXT: br label [[LOOP:%.*]] @@ -115,15 +115,15 @@ define void @cse_different_load_types(ptr %src, ptr noalias %dst, i64 %N) { ; CHECK-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 ; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 ; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] -; CHECK: exit.unr-lcssa.loopexit: -; CHECK-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_1]], [[LOOP]] ] -; CHECK-NEXT: br label [[EXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_UNR_LCSSA:%.*]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: exit.unr-lcssa: -; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_UNR_PH]], [[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[IV_UNR1:%.*]] = phi i64 [ [[IV_NEXT_1]], [[LOOP]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[LOOP_EPIL_PREHEADER:%.*]], label [[EXIT:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[LOOP_EPIL_PREHEADER]], label [[EXIT:%.*]] ; CHECK: loop.epil.preheader: +; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_UNR1]], [[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label [[LOOP_EPIL:%.*]] ; CHECK: loop.epil: ; CHECK-NEXT: [[GEP_SRC_12_EPIL:%.*]] = getelementptr i64, ptr [[SRC_12]], i64 [[IV_UNR]] @@ -170,7 +170,7 @@ define void @cse_volatile_loads(ptr %src, ptr noalias %dst, i64 %N) { ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 -; CHECK-NEXT: br i1 [[TMP1]], label [[EXIT_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP1]], label [[LOOP_EPIL_PREHEADER:%.*]], label [[ENTRY_NEW:%.*]] ; CHECK: entry.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] ; CHECK-NEXT: br label [[LOOP:%.*]] @@ -195,15 +195,15 @@ define void @cse_volatile_loads(ptr %src, ptr noalias %dst, i64 %N) { ; CHECK-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 ; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 ; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] -; CHECK: exit.unr-lcssa.loopexit: -; CHECK-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_1]], [[LOOP]] ] -; CHECK-NEXT: br label [[EXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_UNR_LCSSA:%.*]], label [[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: exit.unr-lcssa: -; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_UNR_PH]], [[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[IV_UNR1:%.*]] = phi i64 [ [[IV_NEXT_1]], [[LOOP]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[LOOP_EPIL_PREHEADER:%.*]], label [[EXIT:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[LOOP_EPIL_PREHEADER]], label [[EXIT:%.*]] ; CHECK: loop.epil.preheader: +; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_UNR1]], [[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label [[LOOP_EPIL:%.*]] ; CHECK: loop.epil: ; CHECK-NEXT: [[GEP_SRC_12_EPIL:%.*]] = getelementptr i64, ptr [[SRC_12]], i64 [[IV_UNR]] @@ -248,7 +248,7 @@ define void @cse_atomic_loads(ptr %src, ptr noalias %dst, i64 %N) { ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 -; CHECK-NEXT: br i1 [[TMP1]], label [[EXIT_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP1]], label [[LOOP_EPIL_PREHEADER:%.*]], label [[ENTRY_NEW:%.*]] ; CHECK: entry.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] ; CHECK-NEXT: br label [[LOOP:%.*]] @@ -273,15 +273,15 @@ define void @cse_atomic_loads(ptr %src, ptr noalias %dst, i64 %N) { ; CHECK-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 ; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 ; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] -; CHECK: exit.unr-lcssa.loopexit: -; CHECK-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_1]], [[LOOP]] ] -; CHECK-NEXT: br label [[EXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_UNR_LCSSA:%.*]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: exit.unr-lcssa: -; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_UNR_PH]], [[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[IV_UNR1:%.*]] = phi i64 [ [[IV_NEXT_1]], [[LOOP]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[LOOP_EPIL_PREHEADER:%.*]], label [[EXIT:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[LOOP_EPIL_PREHEADER]], label [[EXIT:%.*]] ; CHECK: loop.epil.preheader: +; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_UNR1]], [[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label [[LOOP_EPIL:%.*]] ; CHECK: loop.epil: ; CHECK-NEXT: [[GEP_SRC_12_EPIL:%.*]] = getelementptr i64, ptr [[SRC_12]], i64 [[IV_UNR]] @@ -326,7 +326,7 @@ define void @cse_load_may_be_clobbered(ptr %src, ptr %dst, i64 %N) { ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[N]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i64 [[TMP0]], 1 -; CHECK-NEXT: br i1 [[TMP1]], label [[EXIT_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP1]], label [[LOOP_EPIL_PREHEADER:%.*]], label [[ENTRY_NEW:%.*]] ; CHECK: entry.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[N]], [[XTRAITER]] ; CHECK-NEXT: br label [[LOOP:%.*]] @@ -351,15 +351,15 @@ define void @cse_load_may_be_clobbered(ptr %src, ptr %dst, i64 %N) { ; CHECK-NEXT: [[IV_NEXT_1]] = add nuw nsw i64 [[IV]], 2 ; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 ; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_UNR_LCSSA_LOOPEXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] -; CHECK: exit.unr-lcssa.loopexit: -; CHECK-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_1]], [[LOOP]] ] -; CHECK-NEXT: br label [[EXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_UNR_LCSSA:%.*]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: exit.unr-lcssa: -; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_UNR_PH]], [[EXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[IV_UNR1:%.*]] = phi i64 [ [[IV_NEXT_1]], [[LOOP]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[LOOP_EPIL_PREHEADER:%.*]], label [[EXIT:%.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label [[LOOP_EPIL_PREHEADER]], label [[EXIT:%.*]] ; CHECK: loop.epil.preheader: +; CHECK-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_UNR1]], [[EXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label [[LOOP_EPIL:%.*]] ; CHECK: loop.epil: ; CHECK-NEXT: [[GEP_SRC_12_EPIL:%.*]] = getelementptr i64, ptr [[SRC_12]], i64 [[IV_UNR]] diff --git a/llvm/test/Transforms/LoopUnrollAndJam/dependencies_visit_order.ll b/llvm/test/Transforms/LoopUnrollAndJam/dependencies_visit_order.ll index f1a5adf..3510650 100644 --- a/llvm/test/Transforms/LoopUnrollAndJam/dependencies_visit_order.ll +++ b/llvm/test/Transforms/LoopUnrollAndJam/dependencies_visit_order.ll @@ -6,7 +6,7 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" define void @test1() { ; CHECK-LABEL: @test1( ; CHECK-NEXT: bb: -; CHECK-NEXT: br i1 false, label [[BB1_BB43_CRIT_EDGE_UNR_LCSSA:%.*]], label [[BB_NEW:%.*]] +; CHECK-NEXT: br i1 false, label [[BB5_PREHEADER_EPIL_PREHEADER:%.*]], label [[BB_NEW:%.*]] ; CHECK: bb.new: ; CHECK-NEXT: br label [[BB5_PREHEADER:%.*]] ; CHECK: bb5.preheader: @@ -30,17 +30,16 @@ define void @test1() { ; CHECK-NEXT: br i1 true, label [[BB38]], label [[BB10_PREHEADER]] ; CHECK: bb38: ; CHECK-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i16 [[NITER_NEXT_3]], -28 -; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label [[BB1_BB43_CRIT_EDGE_UNR_LCSSA_LOOPEXIT:%.*]], label [[BB5_PREHEADER]], !llvm.loop [[LOOP0:![0-9]+]] -; CHECK: bb1.bb43_crit_edge.unr-lcssa.loopexit: -; CHECK-NEXT: [[I10_UNR_PH:%.*]] = phi i16 [ [[I42_3]], [[BB38]] ] -; CHECK-NEXT: br label [[BB1_BB43_CRIT_EDGE_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label [[BB1_BB43_CRIT_EDGE_UNR_LCSSA:%.*]], label [[BB5_PREHEADER]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: bb1.bb43_crit_edge.unr-lcssa: -; CHECK-NEXT: [[I10_UNR:%.*]] = phi i16 [ 0, [[BB:%.*]] ], [ [[I10_UNR_PH]], [[BB1_BB43_CRIT_EDGE_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: br i1 true, label [[BB5_PREHEADER_EPIL_PREHEADER:%.*]], label [[BB1_BB43_CRIT_EDGE:%.*]] +; CHECK-NEXT: [[I10_UNR:%.*]] = phi i16 [ [[I42_3]], [[BB38]] ] +; CHECK-NEXT: br i1 true, label [[BB5_PREHEADER_EPIL_PREHEADER]], label [[BB1_BB43_CRIT_EDGE:%.*]] ; CHECK: bb5.preheader.epil.preheader: +; CHECK-NEXT: [[I10_EPIL_INIT:%.*]] = phi i16 [ 0, [[BB:%.*]] ], [ [[I10_UNR]], [[BB1_BB43_CRIT_EDGE_UNR_LCSSA]] ] +; CHECK-NEXT: call void @llvm.assume(i1 true) ; CHECK-NEXT: br label [[BB5_PREHEADER_EPIL:%.*]] ; CHECK: bb5.preheader.epil: -; CHECK-NEXT: [[I10_EPIL:%.*]] = phi i16 [ [[I10_UNR]], [[BB5_PREHEADER_EPIL_PREHEADER]] ], [ [[I42_EPIL:%.*]], [[BB38_EPIL:%.*]] ] +; CHECK-NEXT: [[I10_EPIL:%.*]] = phi i16 [ [[I10_EPIL_INIT]], [[BB5_PREHEADER_EPIL_PREHEADER]] ], [ [[I42_EPIL:%.*]], [[BB38_EPIL:%.*]] ] ; CHECK-NEXT: [[EPIL_ITER:%.*]] = phi i16 [ 0, [[BB5_PREHEADER_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], [[BB38_EPIL]] ] ; CHECK-NEXT: br label [[BB10_PREHEADER_EPIL:%.*]] ; CHECK: bb10.preheader.epil: diff --git a/llvm/test/Transforms/LoopUnrollAndJam/followup.ll b/llvm/test/Transforms/LoopUnrollAndJam/followup.ll index 5186f77..c8be48bf 100644 --- a/llvm/test/Transforms/LoopUnrollAndJam/followup.ll +++ b/llvm/test/Transforms/LoopUnrollAndJam/followup.ll @@ -52,7 +52,7 @@ for.end: ; CHECK: br i1 %exitcond.3, label %for.latch, label %for.inner, !llvm.loop ![[LOOP_INNER:[0-9]+]] -; CHECK: br i1 %niter.ncmp.3, label %for.end.loopexit.unr-lcssa.loopexit, label %for.outer, !llvm.loop ![[LOOP_OUTER:[0-9]+]] +; CHECK: br i1 %niter.ncmp.3, label %for.end.loopexit.unr-lcssa, label %for.outer, !llvm.loop ![[LOOP_OUTER:[0-9]+]] ; CHECK: br i1 %exitcond.epil, label %for.latch.epil, label %for.inner.epil, !llvm.loop ![[LOOP_REMAINDER_INNER:[0-9]+]] ; CHECK: br i1 %exitcond.epil.1, label %for.latch.epil.1, label %for.inner.epil.1, !llvm.loop ![[LOOP_REMAINDER_INNER]] ; CHECK: br i1 %exitcond.epil.2, label %for.latch.epil.2, label %for.inner.epil.2, !llvm.loop ![[LOOP_REMAINDER_INNER]] diff --git a/llvm/test/Transforms/LoopUnrollAndJam/unroll-and-jam.ll b/llvm/test/Transforms/LoopUnrollAndJam/unroll-and-jam.ll index 6f48c41..9ee51cf 100644 --- a/llvm/test/Transforms/LoopUnrollAndJam/unroll-and-jam.ll +++ b/llvm/test/Transforms/LoopUnrollAndJam/unroll-and-jam.ll @@ -17,7 +17,7 @@ define void @test1(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocaptu ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[I]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[I]], 3 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[TMP0]], 3 -; CHECK-NEXT: br i1 [[TMP1]], label %[[FOR_END_LOOPEXIT_UNR_LCSSA:.*]], label %[[FOR_OUTER_PREHEADER_NEW:.*]] +; CHECK-NEXT: br i1 [[TMP1]], label %[[FOR_OUTER_EPIL_PREHEADER:.*]], label %[[FOR_OUTER_PREHEADER_NEW:.*]] ; CHECK: [[FOR_OUTER_PREHEADER_NEW]]: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i32 [[I]], [[XTRAITER]] ; CHECK-NEXT: br label %[[FOR_OUTER:.*]] @@ -71,15 +71,15 @@ define void @test1(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocaptu ; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD8_2]] ; CHECK-NEXT: store i32 [[ADD_LCSSA_3]], ptr [[ARRAYIDX6_3]], align 4, !tbaa [[INT_TBAA0]] ; CHECK-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i32 [[NITER_NEXT_3]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label %[[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[FOR_OUTER]], !llvm.loop [[LOOP4:![0-9]+]] -; CHECK: [[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT]]: -; CHECK-NEXT: [[I_UNR_PH:%.*]] = phi i32 [ [[ADD8_3]], %[[FOR_LATCH]] ] -; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label %[[FOR_END_LOOPEXIT_UNR_LCSSA:.*]], label %[[FOR_OUTER]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[FOR_END_LOOPEXIT_UNR_LCSSA]]: -; CHECK-NEXT: [[I_UNR:%.*]] = phi i32 [ 0, %[[FOR_OUTER_PREHEADER]] ], [ [[I_UNR_PH]], %[[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[I_UNR1:%.*]] = phi i32 [ [[ADD8_3]], %[[FOR_LATCH]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_OUTER_EPIL_PREHEADER:.*]], label %[[FOR_END_LOOPEXIT:.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_OUTER_EPIL_PREHEADER]], label %[[FOR_END_LOOPEXIT:.*]] ; CHECK: [[FOR_OUTER_EPIL_PREHEADER]]: +; CHECK-NEXT: [[I_UNR:%.*]] = phi i32 [ 0, %[[FOR_OUTER_PREHEADER]] ], [ [[I_UNR1]], %[[FOR_END_LOOPEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label %[[FOR_OUTER_EPIL:.*]] ; CHECK: [[FOR_OUTER_EPIL]]: ; CHECK-NEXT: br label %[[FOR_INNER_EPIL:.*]] @@ -193,7 +193,7 @@ define void @test2(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocaptu ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[I]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[I]], 3 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[TMP0]], 3 -; CHECK-NEXT: br i1 [[TMP1]], label %[[FOR_END10_LOOPEXIT_UNR_LCSSA:.*]], label %[[FOR_OUTER_PREHEADER_NEW:.*]] +; CHECK-NEXT: br i1 [[TMP1]], label %[[FOR_OUTER_EPIL_PREHEADER:.*]], label %[[FOR_OUTER_PREHEADER_NEW:.*]] ; CHECK: [[FOR_OUTER_PREHEADER_NEW]]: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i32 [[I]], [[XTRAITER]] ; CHECK-NEXT: br label %[[FOR_OUTER:.*]] @@ -251,15 +251,15 @@ define void @test2(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocaptu ; CHECK-NEXT: store i32 [[ADD_LCSSA_2]], ptr [[ARRAYIDX_2]], align 4, !tbaa [[INT_TBAA0]] ; CHECK-NEXT: store i32 [[ADD_LCSSA_3]], ptr [[ARRAYIDX_3]], align 4, !tbaa [[INT_TBAA0]] ; CHECK-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i32 [[NITER_NEXT_3]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label %[[FOR_END10_LOOPEXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[FOR_OUTER]], !llvm.loop [[LOOP6:![0-9]+]] -; CHECK: [[FOR_END10_LOOPEXIT_UNR_LCSSA_LOOPEXIT]]: -; CHECK-NEXT: [[I_UNR_PH:%.*]] = phi i32 [ [[ADD9_3]], %[[FOR_LATCH]] ] -; CHECK-NEXT: br label %[[FOR_END10_LOOPEXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label %[[FOR_END10_LOOPEXIT_UNR_LCSSA:.*]], label %[[FOR_OUTER]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[FOR_END10_LOOPEXIT_UNR_LCSSA]]: -; CHECK-NEXT: [[I_UNR:%.*]] = phi i32 [ 0, %[[FOR_OUTER_PREHEADER]] ], [ [[I_UNR_PH]], %[[FOR_END10_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[I_UNR1:%.*]] = phi i32 [ [[ADD9_3]], %[[FOR_LATCH]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_OUTER_EPIL_PREHEADER:.*]], label %[[FOR_END10_LOOPEXIT:.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_OUTER_EPIL_PREHEADER]], label %[[FOR_END10_LOOPEXIT:.*]] ; CHECK: [[FOR_OUTER_EPIL_PREHEADER]]: +; CHECK-NEXT: [[I_UNR:%.*]] = phi i32 [ 0, %[[FOR_OUTER_PREHEADER]] ], [ [[I_UNR1]], %[[FOR_END10_LOOPEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label %[[FOR_OUTER_EPIL:.*]] ; CHECK: [[FOR_OUTER_EPIL]]: ; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_UNR]] @@ -615,7 +615,7 @@ define i32 @test6() #0 { ; CHECK-LABEL: define i32 @test6() { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[F_PROMOTED10:%.*]] = load i32, ptr @f, align 4, !tbaa [[INT_TBAA0]] -; CHECK-NEXT: br i1 false, label %[[FOR_END_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; CHECK-NEXT: br i1 false, label %[[FOR_OUTER_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; CHECK: [[ENTRY_NEW]]: ; CHECK-NEXT: br label %[[FOR_OUTER:.*]] ; CHECK: [[FOR_OUTER]]: @@ -636,18 +636,15 @@ define i32 @test6() #0 { ; CHECK-NEXT: [[EXITCOND_3:%.*]] = icmp ne i32 [[INC_3]], 7 ; CHECK-NEXT: br i1 [[EXITCOND_3]], label %[[FOR_INNER]], label %[[FOR_LATCH]] ; CHECK: [[FOR_LATCH]]: -; CHECK-NEXT: br i1 false, label %[[FOR_OUTER]], label %[[FOR_END_UNR_LCSSA_LOOPEXIT:.*]], !llvm.loop [[LOOP7:![0-9]+]] -; CHECK: [[FOR_END_UNR_LCSSA_LOOPEXIT]]: +; CHECK-NEXT: br i1 false, label %[[FOR_OUTER]], label %[[FOR_END_UNR_LCSSA:.*]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK: [[FOR_END_UNR_LCSSA]]: ; CHECK-NEXT: [[DOTLCSSA_LCSSA_PH_PH:%.*]] = phi i32 [ 2, %[[FOR_LATCH]] ] ; CHECK-NEXT: [[INC_LCSSA_LCSSA_PH_PH:%.*]] = phi i32 [ 7, %[[FOR_LATCH]] ] ; CHECK-NEXT: [[P0_UNR_PH:%.*]] = phi i32 [ 2, %[[FOR_LATCH]] ] -; CHECK-NEXT: br label %[[FOR_END_UNR_LCSSA]] -; CHECK: [[FOR_END_UNR_LCSSA]]: -; CHECK-NEXT: [[DOTLCSSA_LCSSA_PH:%.*]] = phi i32 [ poison, %[[ENTRY]] ], [ [[DOTLCSSA_LCSSA_PH_PH]], %[[FOR_END_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[INC_LCSSA_LCSSA_PH:%.*]] = phi i32 [ poison, %[[ENTRY]] ], [ [[INC_LCSSA_LCSSA_PH_PH]], %[[FOR_END_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[P0_UNR:%.*]] = phi i32 [ [[F_PROMOTED10]], %[[ENTRY]] ], [ [[P0_UNR_PH]], %[[FOR_END_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: br i1 true, label %[[FOR_OUTER_EPIL_PREHEADER:.*]], label %[[FOR_END:.*]] +; CHECK-NEXT: br i1 true, label %[[FOR_OUTER_EPIL_PREHEADER]], label %[[FOR_END:.*]] ; CHECK: [[FOR_OUTER_EPIL_PREHEADER]]: +; CHECK-NEXT: [[P0_UNR:%.*]] = phi i32 [ [[F_PROMOTED10]], %[[ENTRY]] ], [ [[P0_UNR_PH]], %[[FOR_END_UNR_LCSSA]] ] +; CHECK-NEXT: call void @llvm.assume(i1 true) ; CHECK-NEXT: br label %[[FOR_OUTER_EPIL:.*]] ; CHECK: [[FOR_OUTER_EPIL]]: ; CHECK-NEXT: br label %[[FOR_INNER_EPIL:.*]] @@ -661,8 +658,8 @@ define i32 @test6() #0 { ; CHECK-NEXT: [[DOTLCSSA_EPIL:%.*]] = phi i32 [ [[P1_EPIL]], %[[FOR_INNER_EPIL]] ] ; CHECK-NEXT: br label %[[FOR_END]] ; CHECK: [[FOR_END]]: -; CHECK-NEXT: [[DOTLCSSA_LCSSA:%.*]] = phi i32 [ [[DOTLCSSA_LCSSA_PH]], %[[FOR_END_UNR_LCSSA]] ], [ [[DOTLCSSA_EPIL]], %[[FOR_LATCH_EPIL]] ] -; CHECK-NEXT: [[INC_LCSSA_LCSSA:%.*]] = phi i32 [ [[INC_LCSSA_LCSSA_PH]], %[[FOR_END_UNR_LCSSA]] ], [ 7, %[[FOR_LATCH_EPIL]] ] +; CHECK-NEXT: [[DOTLCSSA_LCSSA:%.*]] = phi i32 [ [[DOTLCSSA_LCSSA_PH_PH]], %[[FOR_END_UNR_LCSSA]] ], [ [[DOTLCSSA_EPIL]], %[[FOR_LATCH_EPIL]] ] +; CHECK-NEXT: [[INC_LCSSA_LCSSA:%.*]] = phi i32 [ [[INC_LCSSA_LCSSA_PH_PH]], %[[FOR_END_UNR_LCSSA]] ], [ 7, %[[FOR_LATCH_EPIL]] ] ; CHECK-NEXT: ret i32 0 ; entry: @@ -708,7 +705,7 @@ define void @test7(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocaptu ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[I]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[I]], 3 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[TMP0]], 3 -; CHECK-NEXT: br i1 [[TMP1]], label %[[FOR_END_LOOPEXIT_UNR_LCSSA:.*]], label %[[FOR_PREHEADER_NEW:.*]] +; CHECK-NEXT: br i1 [[TMP1]], label %[[FOR_OUTER_EPIL_PREHEADER:.*]], label %[[FOR_PREHEADER_NEW:.*]] ; CHECK: [[FOR_PREHEADER_NEW]]: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i32 [[I]], [[XTRAITER]] ; CHECK-NEXT: br label %[[FOR_OUTER:.*]] @@ -747,7 +744,7 @@ define void @test7(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocaptu ; CHECK-NEXT: store i32 [[ADD9_LCSSA_2]], ptr [[ARRAYIDX_2]], align 4, !tbaa [[INT_TBAA0]] ; CHECK-NEXT: store i32 [[ADD9_LCSSA_3]], ptr [[ARRAYIDX_3]], align 4, !tbaa [[INT_TBAA0]] ; CHECK-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i32 [[NITER_NEXT_3]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label %[[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[FOR_OUTER]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label %[[FOR_END_LOOPEXIT_UNR_LCSSA:.*]], label %[[FOR_OUTER]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[FOR_INNER]]: ; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, %[[FOR_OUTER]] ], [ [[ADD9]], %[[FOR_INNER]] ] ; CHECK-NEXT: [[J:%.*]] = phi i32 [ 0, %[[FOR_OUTER]] ], [ [[ADD10:%.*]], %[[FOR_INNER]] ] @@ -775,14 +772,14 @@ define void @test7(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocaptu ; CHECK-NEXT: [[ADD10_3]] = add nuw i32 [[J_3]], 1 ; CHECK-NEXT: [[EXITCOND_3:%.*]] = icmp eq i32 [[ADD10_3]], [[E]] ; CHECK-NEXT: br i1 [[EXITCOND_3]], label %[[FOR_LATCH]], label %[[FOR_INNER]] -; CHECK: [[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT]]: -; CHECK-NEXT: [[I_UNR_PH:%.*]] = phi i32 [ [[ADD_3]], %[[FOR_LATCH]] ] -; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT_UNR_LCSSA]] ; CHECK: [[FOR_END_LOOPEXIT_UNR_LCSSA]]: -; CHECK-NEXT: [[I_UNR:%.*]] = phi i32 [ 0, %[[FOR_PREHEADER]] ], [ [[I_UNR_PH]], %[[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[I_UNR1:%.*]] = phi i32 [ [[ADD_3]], %[[FOR_LATCH]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_OUTER_EPIL_PREHEADER:.*]], label %[[FOR_END_LOOPEXIT:.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_OUTER_EPIL_PREHEADER]], label %[[FOR_END_LOOPEXIT:.*]] ; CHECK: [[FOR_OUTER_EPIL_PREHEADER]]: +; CHECK-NEXT: [[I_UNR:%.*]] = phi i32 [ 0, %[[FOR_PREHEADER]] ], [ [[I_UNR1]], %[[FOR_END_LOOPEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label %[[FOR_OUTER_EPIL:.*]] ; CHECK: [[FOR_OUTER_EPIL]]: ; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_UNR]] @@ -907,7 +904,7 @@ define void @test8(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocaptu ; CHECK-NEXT: [[X_038:%.*]] = phi i32 [ [[INC:%.*]], %[[FOR_CLEANUP:.*]] ], [ 0, %[[FOR_PREHEADER]] ] ; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[I]], 3 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[TMP0]], 3 -; CHECK-NEXT: br i1 [[TMP1]], label %[[FOR_CLEANUP_UNR_LCSSA:.*]], label %[[FOR_OUTEST_NEW:.*]] +; CHECK-NEXT: br i1 [[TMP1]], label %[[FOR_OUTER_EPIL_PREHEADER:.*]], label %[[FOR_OUTEST_NEW:.*]] ; CHECK: [[FOR_OUTEST_NEW]]: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i32 [[I]], [[XTRAITER]] ; CHECK-NEXT: br label %[[FOR_OUTER:.*]] @@ -922,10 +919,10 @@ define void @test8(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocaptu ; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD]] ; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_1]], align 4, !tbaa [[INT_TBAA0]] ; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i32 [[I]], 2 -; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_1]] -; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX6_1]], align 4, !tbaa [[INT_TBAA0]] ; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_1]] -; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_2]], align 4, !tbaa [[INT_TBAA0]] +; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX_2]], align 4, !tbaa [[INT_TBAA0]] +; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_1]] +; CHECK-NEXT: store i32 0, ptr [[ARRAYIDX_4]], align 4, !tbaa [[INT_TBAA0]] ; CHECK-NEXT: [[ADD_2:%.*]] = add nuw nsw i32 [[I]], 3 ; CHECK-NEXT: [[ARRAYIDX6_2:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD_2]] ; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX6_2]], align 4, !tbaa [[INT_TBAA0]] @@ -970,18 +967,18 @@ define void @test8(i32 %I, i32 %E, ptr noalias nocapture %A, ptr noalias nocaptu ; CHECK-NEXT: [[ADD9_LCSSA_3:%.*]] = phi i32 [ [[ADD9_3]], %[[FOR_INNER]] ] ; CHECK-NEXT: store i32 [[ADD9_LCSSA]], ptr [[ARRAYIDX]], align 4, !tbaa [[INT_TBAA0]] ; CHECK-NEXT: store i32 [[ADD9_LCSSA_1]], ptr [[ARRAYIDX_1]], align 4, !tbaa [[INT_TBAA0]] -; CHECK-NEXT: store i32 [[ADD9_LCSSA_2]], ptr [[ARRAYIDX_2]], align 4, !tbaa [[INT_TBAA0]] +; CHECK-NEXT: store i32 [[ADD9_LCSSA_2]], ptr [[ARRAYIDX_4]], align 4, !tbaa [[INT_TBAA0]] ; CHECK-NEXT: store i32 [[ADD9_LCSSA_3]], ptr [[ARRAYIDX_3]], align 4, !tbaa [[INT_TBAA0]] ; CHECK-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i32 [[NITER_NEXT_3]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label %[[FOR_CLEANUP_UNR_LCSSA_LOOPEXIT:.*]], label %[[FOR_OUTER]], !llvm.loop [[LOOP9:![0-9]+]] -; CHECK: [[FOR_CLEANUP_UNR_LCSSA_LOOPEXIT]]: -; CHECK-NEXT: [[I_UNR_PH:%.*]] = phi i32 [ [[ADD_3]], %[[FOR_LATCH]] ] -; CHECK-NEXT: br label %[[FOR_CLEANUP_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label %[[FOR_CLEANUP_UNR_LCSSA:.*]], label %[[FOR_OUTER]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[FOR_CLEANUP_UNR_LCSSA]]: -; CHECK-NEXT: [[I_UNR:%.*]] = phi i32 [ 0, %[[FOR_OUTEST]] ], [ [[I_UNR_PH]], %[[FOR_CLEANUP_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[I_UNR1:%.*]] = phi i32 [ [[ADD_3]], %[[FOR_LATCH]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_OUTER_EPIL_PREHEADER:.*]], label %[[FOR_CLEANUP]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_OUTER_EPIL_PREHEADER]], label %[[FOR_CLEANUP]] ; CHECK: [[FOR_OUTER_EPIL_PREHEADER]]: +; CHECK-NEXT: [[I_UNR:%.*]] = phi i32 [ 0, %[[FOR_OUTEST]] ], [ [[I_UNR1]], %[[FOR_CLEANUP_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label %[[FOR_OUTER_EPIL:.*]] ; CHECK: [[FOR_OUTER_EPIL]]: ; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[I_UNR]] @@ -1116,7 +1113,7 @@ define void @test9(i32 %I, i32 %E, ptr nocapture %A, ptr nocapture readonly %B) ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[I]], -1 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i32 [[I]], 3 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[TMP0]], 3 -; CHECK-NEXT: br i1 [[TMP1]], label %[[FOR_END_LOOPEXIT_UNR_LCSSA:.*]], label %[[FOR_OUTER_PREHEADER_NEW:.*]] +; CHECK-NEXT: br i1 [[TMP1]], label %[[FOR_OUTER_EPIL_PREHEADER:.*]], label %[[FOR_OUTER_PREHEADER_NEW:.*]] ; CHECK: [[FOR_OUTER_PREHEADER_NEW]]: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = sub i32 [[I]], [[XTRAITER]] ; CHECK-NEXT: br label %[[FOR_OUTER:.*]] @@ -1174,15 +1171,15 @@ define void @test9(i32 %I, i32 %E, ptr nocapture %A, ptr nocapture readonly %B) ; CHECK-NEXT: [[ARRAYIDX6_3:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[ADD8_2]] ; CHECK-NEXT: store i32 [[ADD_LCSSA_3]], ptr [[ARRAYIDX6_3]], align 4, !tbaa [[INT_TBAA0]] ; CHECK-NEXT: [[NITER_NCMP_3:%.*]] = icmp eq i32 [[NITER_NEXT_3]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label %[[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[FOR_OUTER]], !llvm.loop [[LOOP12:![0-9]+]] -; CHECK: [[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT]]: -; CHECK-NEXT: [[I_UNR_PH:%.*]] = phi i32 [ [[ADD8_3]], %[[FOR_LATCH]] ] -; CHECK-NEXT: br label %[[FOR_END_LOOPEXIT_UNR_LCSSA]] +; CHECK-NEXT: br i1 [[NITER_NCMP_3]], label %[[FOR_END_LOOPEXIT_UNR_LCSSA:.*]], label %[[FOR_OUTER]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[FOR_END_LOOPEXIT_UNR_LCSSA]]: -; CHECK-NEXT: [[I_UNR:%.*]] = phi i32 [ 0, %[[FOR_OUTER_PREHEADER]] ], [ [[I_UNR_PH]], %[[FOR_END_LOOPEXIT_UNR_LCSSA_LOOPEXIT]] ] +; CHECK-NEXT: [[I_UNR1:%.*]] = phi i32 [ [[ADD8_3]], %[[FOR_LATCH]] ] ; CHECK-NEXT: [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_OUTER_EPIL_PREHEADER:.*]], label %[[FOR_END_LOOPEXIT:.*]] +; CHECK-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_OUTER_EPIL_PREHEADER]], label %[[FOR_END_LOOPEXIT:.*]] ; CHECK: [[FOR_OUTER_EPIL_PREHEADER]]: +; CHECK-NEXT: [[I_UNR:%.*]] = phi i32 [ 0, %[[FOR_OUTER_PREHEADER]] ], [ [[I_UNR1]], %[[FOR_END_LOOPEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD1:%.*]] = icmp ne i32 [[XTRAITER]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[LCMP_MOD1]]) ; CHECK-NEXT: br label %[[FOR_OUTER_EPIL:.*]] ; CHECK: [[FOR_OUTER_EPIL]]: ; CHECK-NEXT: br label %[[FOR_INNER_EPIL:.*]] @@ -1293,11 +1290,11 @@ for.end: define signext i16 @test10(i32 %k) #0 { ; CHECK-LABEL: define signext i16 @test10( ; CHECK-SAME: i32 [[K:%.*]]) { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr @c, align 1 ; CHECK-NEXT: [[TOBOOL9:%.*]] = icmp eq i8 [[TMP0]], 0 ; CHECK-NEXT: [[TOBOOL13:%.*]] = icmp ne i32 [[K]], 0 -; CHECK-NEXT: br i1 false, label %[[FOR_END26_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]] +; CHECK-NEXT: br i1 false, label %[[FOR_BODY_EPIL_PREHEADER:.*]], label %[[ENTRY_NEW:.*]] ; CHECK: [[ENTRY_NEW]]: ; CHECK-NEXT: br label %[[FOR_BODY:.*]] ; CHECK: [[FOR_BODY]]: @@ -1325,18 +1322,14 @@ define signext i16 @test10(i32 %k) #0 { ; CHECK-NEXT: br i1 [[TOBOOL9]], label %[[FOR_BODY2_SPLIT_1:.*]], label %[[FOR_BODY2_SPLIT2_1:.*]] ; CHECK: [[FOR_INC24]]: ; CHECK-NEXT: [[STOREMERGE_4_LCSSA_3:%.*]] = phi i64 [ [[STOREMERGE_4_3:%.*]], %[[FOR_INC21_3]] ] -; CHECK-NEXT: br i1 false, label %[[FOR_BODY]], label %[[FOR_END26_UNR_LCSSA_LOOPEXIT:.*]], !llvm.loop [[LOOP13:![0-9]+]] -; CHECK: [[FOR_END26_UNR_LCSSA_LOOPEXIT]]: +; CHECK-NEXT: br i1 false, label %[[FOR_BODY]], label %[[FOR_END26_UNR_LCSSA:.*]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK: [[FOR_END26_UNR_LCSSA]]: ; CHECK-NEXT: [[DEC_LCSSA_LCSSA_PH_PH:%.*]] = phi i64 [ 0, %[[FOR_INC24]] ] ; CHECK-NEXT: [[STOREMERGE_4_LCSSA_LCSSA_PH_PH:%.*]] = phi i64 [ [[STOREMERGE_4_LCSSA_3]], %[[FOR_INC24]] ] ; CHECK-NEXT: [[STOREMERGE_5_LCSSA_LCSSA_PH_PH:%.*]] = phi i32 [ 0, %[[FOR_INC24]] ] -; CHECK-NEXT: br label %[[FOR_END26_UNR_LCSSA]] -; CHECK: [[FOR_END26_UNR_LCSSA]]: -; CHECK-NEXT: [[DEC_LCSSA_LCSSA_PH:%.*]] = phi i64 [ poison, %[[ENTRY]] ], [ [[DEC_LCSSA_LCSSA_PH_PH]], %[[FOR_END26_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[STOREMERGE_4_LCSSA_LCSSA_PH:%.*]] = phi i64 [ poison, %[[ENTRY]] ], [ [[STOREMERGE_4_LCSSA_LCSSA_PH_PH]], %[[FOR_END26_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: [[STOREMERGE_5_LCSSA_LCSSA_PH:%.*]] = phi i32 [ poison, %[[ENTRY]] ], [ [[STOREMERGE_5_LCSSA_LCSSA_PH_PH]], %[[FOR_END26_UNR_LCSSA_LOOPEXIT]] ] -; CHECK-NEXT: br i1 true, label %[[FOR_BODY_EPIL_PREHEADER:.*]], label %[[FOR_END26:.*]] +; CHECK-NEXT: br i1 true, label %[[FOR_BODY_EPIL_PREHEADER]], label %[[FOR_END26:.*]] ; CHECK: [[FOR_BODY_EPIL_PREHEADER]]: +; CHECK-NEXT: call void @llvm.assume(i1 true) ; CHECK-NEXT: br label %[[FOR_BODY_EPIL:.*]] ; CHECK: [[FOR_BODY_EPIL]]: ; CHECK-NEXT: br label %[[FOR_BODY2_EPIL:.*]] @@ -1360,9 +1353,9 @@ define signext i16 @test10(i32 %k) #0 { ; CHECK-NEXT: [[STOREMERGE_4_LCSSA_EPIL:%.*]] = phi i64 [ [[STOREMERGE_4_EPIL]], %[[FOR_INC21_EPIL]] ] ; CHECK-NEXT: br label %[[FOR_END26]] ; CHECK: [[FOR_END26]]: -; CHECK-NEXT: [[DEC_LCSSA_LCSSA:%.*]] = phi i64 [ [[DEC_LCSSA_LCSSA_PH]], %[[FOR_END26_UNR_LCSSA]] ], [ 0, %[[FOR_INC24_EPIL]] ] -; CHECK-NEXT: [[STOREMERGE_4_LCSSA_LCSSA:%.*]] = phi i64 [ [[STOREMERGE_4_LCSSA_LCSSA_PH]], %[[FOR_END26_UNR_LCSSA]] ], [ [[STOREMERGE_4_LCSSA_EPIL]], %[[FOR_INC24_EPIL]] ] -; CHECK-NEXT: [[STOREMERGE_5_LCSSA_LCSSA:%.*]] = phi i32 [ [[STOREMERGE_5_LCSSA_LCSSA_PH]], %[[FOR_END26_UNR_LCSSA]] ], [ 0, %[[FOR_INC24_EPIL]] ] +; CHECK-NEXT: [[DEC_LCSSA_LCSSA:%.*]] = phi i64 [ [[DEC_LCSSA_LCSSA_PH_PH]], %[[FOR_END26_UNR_LCSSA]] ], [ 0, %[[FOR_INC24_EPIL]] ] +; CHECK-NEXT: [[STOREMERGE_4_LCSSA_LCSSA:%.*]] = phi i64 [ [[STOREMERGE_4_LCSSA_LCSSA_PH_PH]], %[[FOR_END26_UNR_LCSSA]] ], [ [[STOREMERGE_4_LCSSA_EPIL]], %[[FOR_INC24_EPIL]] ] +; CHECK-NEXT: [[STOREMERGE_5_LCSSA_LCSSA:%.*]] = phi i32 [ [[STOREMERGE_5_LCSSA_LCSSA_PH_PH]], %[[FOR_END26_UNR_LCSSA]] ], [ 0, %[[FOR_INC24_EPIL]] ] ; CHECK-NEXT: store i64 [[DEC_LCSSA_LCSSA]], ptr @g, align 8 ; CHECK-NEXT: ret i16 0 ; CHECK: [[FOR_BODY2_SPLIT2_1]]: diff --git a/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll b/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll index 3b0ad73..39217e5 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll @@ -23,7 +23,7 @@ define void @fp_iv_loop1(ptr noalias nocapture %A, i32 %N) #0 { ; AUTO_VEC: [[ITER_CHECK]]: ; AUTO_VEC-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 ; AUTO_VEC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 4 -; AUTO_VEC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[FOR_BODY:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; AUTO_VEC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] ; AUTO_VEC: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: ; AUTO_VEC-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[TMP0]], 32 ; AUTO_VEC-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] @@ -60,7 +60,7 @@ define void @fp_iv_loop1(ptr noalias nocapture %A, i32 %N) #0 { ; AUTO_VEC-NEXT: [[TMP11:%.*]] = fmul fast float 5.000000e-01, [[DOTCAST12]] ; AUTO_VEC-NEXT: [[IND_END1:%.*]] = fadd fast float 1.000000e+00, [[TMP11]] ; AUTO_VEC-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 -; AUTO_VEC-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[FOR_BODY]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] +; AUTO_VEC-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3:![0-9]+]] ; AUTO_VEC: [[VEC_EPILOG_PH]]: ; AUTO_VEC-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; AUTO_VEC-NEXT: [[BC_RESUME_VAL:%.*]] = phi float [ [[IND_END]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 1.000000e+00, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -84,14 +84,14 @@ define void @fp_iv_loop1(ptr noalias nocapture %A, i32 %N) #0 { ; AUTO_VEC-NEXT: br i1 [[TMP9]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; AUTO_VEC: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; AUTO_VEC-NEXT: [[CMP_N9:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC3]] -; AUTO_VEC-NEXT: br i1 [[CMP_N9]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]] -; AUTO_VEC: [[FOR_BODY]]: +; AUTO_VEC-NEXT: br i1 [[CMP_N9]], label %[[FOR_END_LOOPEXIT]], label %[[VEC_EPILOG_SCALAR_PH]] +; AUTO_VEC: [[VEC_EPILOG_SCALAR_PH]]: ; AUTO_VEC-NEXT: [[BC_RESUME_VAL10:%.*]] = phi i64 [ [[N_VEC3]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] ; AUTO_VEC-NEXT: [[BC_RESUME_VAL11:%.*]] = phi float [ [[TMP10]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END1]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 1.000000e+00, %[[ITER_CHECK]] ] ; AUTO_VEC-NEXT: br label %[[LOOP:.*]] ; AUTO_VEC: [[LOOP]]: -; AUTO_VEC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL10]], %[[FOR_BODY]] ] -; AUTO_VEC-NEXT: [[X_06:%.*]] = phi float [ [[CONV1:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL11]], %[[FOR_BODY]] ] +; AUTO_VEC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL10]], %[[VEC_EPILOG_SCALAR_PH]] ] +; AUTO_VEC-NEXT: [[X_06:%.*]] = phi float [ [[CONV1:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL11]], %[[VEC_EPILOG_SCALAR_PH]] ] ; AUTO_VEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] ; AUTO_VEC-NEXT: store float [[X_06]], ptr [[ARRAYIDX]], align 4 ; AUTO_VEC-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01 @@ -144,19 +144,19 @@ define void @fp_iv_loop2(ptr noalias nocapture %A, i32 %N) { ; AUTO_VEC-SAME: ptr noalias captures(none) [[A:%.*]], i32 [[N:%.*]]) #[[ATTR0]] { ; AUTO_VEC-NEXT: [[ENTRY:.*:]] ; AUTO_VEC-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[N]], 0 -; AUTO_VEC-NEXT: br i1 [[CMP4]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_END:.*]] -; AUTO_VEC: [[FOR_BODY_PREHEADER]]: -; AUTO_VEC-NEXT: br label %[[FOR_BODY:.*]] -; AUTO_VEC: [[FOR_BODY]]: -; AUTO_VEC-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], %[[FOR_BODY]] ], [ 0, %[[FOR_BODY_PREHEADER]] ] -; AUTO_VEC-NEXT: [[X_06_EPIL:%.*]] = phi float [ [[CONV1_EPIL:%.*]], %[[FOR_BODY]] ], [ 1.000000e+00, %[[FOR_BODY_PREHEADER]] ] +; AUTO_VEC-NEXT: br i1 [[CMP4]], label %[[LOOP_PREHEADER:.*]], label %[[FOR_END:.*]] +; AUTO_VEC: [[LOOP_PREHEADER]]: +; AUTO_VEC-NEXT: br label %[[LOOP:.*]] +; AUTO_VEC: [[LOOP]]: +; AUTO_VEC-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_EPIL:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ] +; AUTO_VEC-NEXT: [[X_06_EPIL:%.*]] = phi float [ [[CONV1_EPIL:%.*]], %[[LOOP]] ], [ 1.000000e+00, %[[LOOP_PREHEADER]] ] ; AUTO_VEC-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV_EPIL]] ; AUTO_VEC-NEXT: store float [[X_06_EPIL]], ptr [[ARRAYIDX_EPIL]], align 4 ; AUTO_VEC-NEXT: [[CONV1_EPIL]] = fadd float [[X_06_EPIL]], 5.000000e-01 ; AUTO_VEC-NEXT: [[INDVARS_IV_NEXT_EPIL]] = add nuw nsw i64 [[INDVARS_IV_EPIL]], 1 ; AUTO_VEC-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT_EPIL]] to i32 ; AUTO_VEC-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] -; AUTO_VEC-NEXT: br i1 [[EXITCOND]], label %[[FOR_END_LOOPEXIT:.*]], label %[[FOR_BODY]] +; AUTO_VEC-NEXT: br i1 [[EXITCOND]], label %[[FOR_END_LOOPEXIT:.*]], label %[[LOOP]] ; AUTO_VEC: [[FOR_END_LOOPEXIT]]: ; AUTO_VEC-NEXT: br label %[[FOR_END]] ; AUTO_VEC: [[FOR_END]]: @@ -193,7 +193,7 @@ define double @external_use_with_fast_math(ptr %a, i64 %n) { ; AUTO_VEC-NEXT: [[ENTRY:.*]]: ; AUTO_VEC-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) ; AUTO_VEC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 16 -; AUTO_VEC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[FOR_BODY:.*]], label %[[VECTOR_PH:.*]] +; AUTO_VEC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; AUTO_VEC: [[VECTOR_PH]]: ; AUTO_VEC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 16 ; AUTO_VEC-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] @@ -222,14 +222,14 @@ define double @external_use_with_fast_math(ptr %a, i64 %n) { ; AUTO_VEC: [[MIDDLE_BLOCK]]: ; AUTO_VEC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] ; AUTO_VEC-NEXT: [[TMP7:%.*]] = fsub fast double [[TMP6]], 3.000000e+00 -; AUTO_VEC-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[FOR_BODY]] -; AUTO_VEC: [[FOR_BODY]]: +; AUTO_VEC-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]] +; AUTO_VEC: [[SCALAR_PH]]: ; AUTO_VEC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] ; AUTO_VEC-NEXT: [[BC_RESUME_VAL1:%.*]] = phi double [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ] ; AUTO_VEC-NEXT: br label %[[LOOP:.*]] ; AUTO_VEC: [[LOOP]]: -; AUTO_VEC-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[FOR_BODY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] -; AUTO_VEC-NEXT: [[J:%.*]] = phi double [ [[BC_RESUME_VAL1]], %[[FOR_BODY]] ], [ [[J_NEXT:%.*]], %[[LOOP]] ] +; AUTO_VEC-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ] +; AUTO_VEC-NEXT: [[J:%.*]] = phi double [ [[BC_RESUME_VAL1]], %[[SCALAR_PH]] ], [ [[J_NEXT:%.*]], %[[LOOP]] ] ; AUTO_VEC-NEXT: [[T0:%.*]] = getelementptr double, ptr [[A]], i64 [[I]] ; AUTO_VEC-NEXT: store double [[J]], ptr [[T0]], align 8 ; AUTO_VEC-NEXT: [[I_NEXT]] = add i64 [[I]], 1 @@ -261,19 +261,19 @@ for.end: define double @external_use_without_fast_math(ptr %a, i64 %n) { ; AUTO_VEC-LABEL: define double @external_use_without_fast_math( ; AUTO_VEC-SAME: ptr [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { -; AUTO_VEC-NEXT: [[ENTRY_NEW:.*]]: -; AUTO_VEC-NEXT: br label %[[FOR_BODY:.*]] -; AUTO_VEC: [[FOR_BODY]]: -; AUTO_VEC-NEXT: [[I:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[I_NEXT_7:%.*]], %[[FOR_BODY]] ] -; AUTO_VEC-NEXT: [[J:%.*]] = phi double [ 0.000000e+00, %[[ENTRY_NEW]] ], [ [[J_NEXT_7:%.*]], %[[FOR_BODY]] ] +; AUTO_VEC-NEXT: [[ENTRY:.*]]: +; AUTO_VEC-NEXT: br label %[[LOOP:.*]] +; AUTO_VEC: [[LOOP]]: +; AUTO_VEC-NEXT: [[I:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[I_NEXT_7:%.*]], %[[LOOP]] ] +; AUTO_VEC-NEXT: [[J:%.*]] = phi double [ 0.000000e+00, %[[ENTRY]] ], [ [[J_NEXT_7:%.*]], %[[LOOP]] ] ; AUTO_VEC-NEXT: [[TMP7:%.*]] = getelementptr double, ptr [[A]], i64 [[I]] ; AUTO_VEC-NEXT: store double [[J]], ptr [[TMP7]], align 8 ; AUTO_VEC-NEXT: [[I_NEXT_7]] = add i64 [[I]], 1 ; AUTO_VEC-NEXT: [[J_NEXT_7]] = fadd double [[J]], 3.000000e+00 ; AUTO_VEC-NEXT: [[COND:%.*]] = icmp slt i64 [[I_NEXT_7]], [[N]] -; AUTO_VEC-NEXT: br i1 [[COND]], label %[[FOR_BODY]], label %[[FOR_END:.*]] +; AUTO_VEC-NEXT: br i1 [[COND]], label %[[LOOP]], label %[[FOR_END:.*]] ; AUTO_VEC: [[FOR_END]]: -; AUTO_VEC-NEXT: [[J_LCSSA:%.*]] = phi double [ [[J]], %[[FOR_BODY]] ] +; AUTO_VEC-NEXT: [[J_LCSSA:%.*]] = phi double [ [[J]], %[[LOOP]] ] ; AUTO_VEC-NEXT: ret double [[J_LCSSA]] ; entry: @@ -308,7 +308,7 @@ define void @fadd_reassoc_FMF(ptr nocapture %p, i32 %N) { ; AUTO_VEC-NEXT: [[ITER_CHECK:.*]]: ; AUTO_VEC-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 ; AUTO_VEC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 4 -; AUTO_VEC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[FOR_BODY:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] +; AUTO_VEC-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]] ; AUTO_VEC: [[VECTOR_MAIN_LOOP_ITER_CHECK]]: ; AUTO_VEC-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[TMP0]], 32 ; AUTO_VEC-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]] @@ -353,7 +353,7 @@ define void @fadd_reassoc_FMF(ptr nocapture %p, i32 %N) { ; AUTO_VEC-NEXT: [[TMP12:%.*]] = fmul reassoc float 4.200000e+01, [[DOTCAST16]] ; AUTO_VEC-NEXT: [[IND_END1:%.*]] = fadd reassoc float 1.000000e+00, [[TMP12]] ; AUTO_VEC-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4 -; AUTO_VEC-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[FOR_BODY]], label %[[VEC_EPILOG_PH]], !prof [[PROF3]] +; AUTO_VEC-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF3]] ; AUTO_VEC: [[VEC_EPILOG_PH]]: ; AUTO_VEC-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; AUTO_VEC-NEXT: [[BC_RESUME_VAL:%.*]] = phi float [ [[IND_END]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 1.000000e+00, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ] @@ -379,14 +379,14 @@ define void @fadd_reassoc_FMF(ptr nocapture %p, i32 %N) { ; AUTO_VEC-NEXT: br i1 [[TMP15]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; AUTO_VEC: [[VEC_EPILOG_MIDDLE_BLOCK]]: ; AUTO_VEC-NEXT: [[CMP_N18:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC6]] -; AUTO_VEC-NEXT: br i1 [[CMP_N18]], label %[[EXIT]], label %[[FOR_BODY]] -; AUTO_VEC: [[FOR_BODY]]: +; AUTO_VEC-NEXT: br i1 [[CMP_N18]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]] +; AUTO_VEC: [[VEC_EPILOG_SCALAR_PH]]: ; AUTO_VEC-NEXT: [[BC_RESUME_VAL14:%.*]] = phi i64 [ [[N_VEC6]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ] ; AUTO_VEC-NEXT: [[BC_RESUME_VAL15:%.*]] = phi float [ [[TMP18]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END1]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 1.000000e+00, %[[ITER_CHECK]] ] ; AUTO_VEC-NEXT: br label %[[LOOP:.*]] ; AUTO_VEC: [[LOOP]]: -; AUTO_VEC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL14]], %[[FOR_BODY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[LOOP]] ] -; AUTO_VEC-NEXT: [[X_012:%.*]] = phi float [ [[BC_RESUME_VAL15]], %[[FOR_BODY]] ], [ [[ADD3:%.*]], %[[LOOP]] ] +; AUTO_VEC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL14]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[LOOP]] ] +; AUTO_VEC-NEXT: [[X_012:%.*]] = phi float [ [[BC_RESUME_VAL15]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[ADD3:%.*]], %[[LOOP]] ] ; AUTO_VEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[P]], i64 [[INDVARS_IV]] ; AUTO_VEC-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; AUTO_VEC-NEXT: [[ADD:%.*]] = fadd reassoc float [[X_012]], [[TMP16]] diff --git a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll index 75420d4..bcea03a 100644 --- a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll +++ b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll @@ -1182,31 +1182,13 @@ define void @deref_assumption_in_header_constant_trip_count_nofree_via_context(p ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4 -; CHECK-NEXT: [[TMP2:%.*]] = icmp slt <2 x i32> [[WIDE_LOAD1]], zeroinitializer -; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 -; CHECK-NEXT: br i1 [[TMP13]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] -; CHECK: [[PRED_LOAD_IF]]: -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 -; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP5]], i32 0 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] -; CHECK: [[PRED_LOAD_CONTINUE]]: -; CHECK-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 -; CHECK-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] -; CHECK: [[PRED_LOAD_IF1]]: -; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP9]] -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP11]], i32 1 -; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] -; CHECK: [[PRED_LOAD_CONTINUE2]]: -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP12]], %[[PRED_LOAD_IF1]] ] -; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[WIDE_LOAD1]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp sge <2 x i32> [[WIDE_LOAD1]], zeroinitializer +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x i32>, ptr [[TMP1]], align 4 +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> [[WIDE_LOAD1]], <2 x i32> [[WIDE_LOAD2]] ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] ; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP14]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll index f794620..cc3bda4 100644 --- a/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll +++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll @@ -504,24 +504,35 @@ exit: define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_size_nofree_via_context(ptr noalias %p1, ptr noalias %p2) nosync { ; CHECK-LABEL: define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_size_nofree_via_context( ; CHECK-SAME: ptr noalias [[P1:%.*]], ptr noalias [[P2:%.*]]) #[[ATTR1:[0-9]+]] { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P1]], i64 4), "dereferenceable"(ptr [[P1]], i64 1024) ] ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i64 4), "dereferenceable"(ptr [[P2]], i64 1024) ] -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], %[[LOOP_INC:.*]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT3:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX1]] -; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[ARRAYIDX2]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX1]] -; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[TMP1]], align 1 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] -; CHECK-NEXT: br i1 [[CMP3]], label %[[LOOP_INC]], label %[[LOOP_END:.*]] -; CHECK: [[LOOP_INC]]: -; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX1]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[LOOP_END]] +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1 +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i8> [[WIDE_LOAD]], [[WIDE_LOAD2]] +; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 4 +; CHECK-NEXT: [[TMP3:%.*]] = freeze <4 x i1> [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP3]]) +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 1024 +; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP4]], [[TMP5]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_SPLIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK: [[MIDDLE_SPLIT]]: +; CHECK-NEXT: br i1 [[TMP4]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[MIDDLE_BLOCK:.*]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br label %[[LOOP_END:.*]] +; CHECK: [[VECTOR_EARLY_EXIT]]: +; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP2]], i1 true) +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX1]], [[TMP7]] +; CHECK-NEXT: br label %[[LOOP_END]] ; CHECK: [[LOOP_END]]: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX1]], %[[LOOP]] ], [ -1, %[[LOOP_INC]] ] +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ -1, %[[MIDDLE_BLOCK]] ], [ [[TMP8]], %[[VECTOR_EARLY_EXIT]] ] ; CHECK-NEXT: ret i64 [[RETVAL]] ; entry: diff --git a/llvm/test/Transforms/LowerTypeTests/simple.ll b/llvm/test/Transforms/LowerTypeTests/simple.ll index 6fb8f6f..173a6ae 100644 --- a/llvm/test/Transforms/LowerTypeTests/simple.ll +++ b/llvm/test/Transforms/LowerTypeTests/simple.ll @@ -56,7 +56,7 @@ define i1 @foo(ptr %p) { ; CHECK: [[R8:%[^ ]*]] = getelementptr i8, ptr @bits_use.{{[0-9]*}}, i32 [[R5]] ; CHECK: [[R9:%[^ ]*]] = load i8, ptr [[R8]] - ; CHECK: [[R10:%[^ ]*]] = and i8 [[R9]], 1 + ; CHECK: [[R10:%[^ ]*]] = and i8 [[R9]], ptrtoint (ptr inttoptr (i8 1 to ptr) to i8) ; CHECK: [[R11:%[^ ]*]] = icmp ne i8 [[R10]], 0 ; CHECK: [[R16:%[^ ]*]] = phi i1 [ false, {{%[^ ]*}} ], [ [[R11]], {{%[^ ]*}} ] @@ -91,7 +91,7 @@ define i1 @baz(ptr %p) { ; CHECK: [[T8:%[^ ]*]] = getelementptr i8, ptr @bits_use{{(\.[0-9]*)?}}, i32 [[T5]] ; CHECK: [[T9:%[^ ]*]] = load i8, ptr [[T8]] - ; CHECK: [[T10:%[^ ]*]] = and i8 [[T9]], 2 + ; CHECK: [[T10:%[^ ]*]] = and i8 [[T9]], ptrtoint (ptr inttoptr (i8 2 to ptr) to i8) ; CHECK: [[T11:%[^ ]*]] = icmp ne i8 [[T10]], 0 ; CHECK: [[T16:%[^ ]*]] = phi i1 [ false, {{%[^ ]*}} ], [ [[T11]], {{%[^ ]*}} ] diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/extra-unroll-simplifications.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/extra-unroll-simplifications.ll index d8fc42b..57dacd4 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/extra-unroll-simplifications.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/extra-unroll-simplifications.ll @@ -14,7 +14,7 @@ define void @partial_unroll_forced(i32 %N, ptr %src, ptr noalias %dst) { ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[N]] to i64 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 1 ; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[N]], 1 -; CHECK-NEXT: br i1 [[TMP0]], label [[EXIT_LOOPEXIT_UNR_LCSSA:%.*]], label [[LOOP_LATCH_PREHEADER_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP0]], label [[LOOP_LATCH_EPIL_PREHEADER:%.*]], label [[LOOP_LATCH_PREHEADER_NEW:%.*]] ; CHECK: loop.latch.preheader.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 2147483646 ; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] @@ -35,12 +35,14 @@ define void @partial_unroll_forced(i32 %N, ptr %src, ptr noalias %dst) { ; CHECK-NEXT: [[INDVARS_IV_NEXT_1]] = add nuw nsw i64 [[INDVARS_IV]], 2 ; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 ; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_LOOPEXIT_UNR_LCSSA]], label [[LOOP_LATCH]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_LOOPEXIT_UNR_LCSSA:%.*]], label [[LOOP_LATCH]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: exit.loopexit.unr-lcssa: -; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[LOOP_LATCH_PREHEADER]] ], [ [[INDVARS_IV_NEXT_1]], [[LOOP_LATCH]] ] ; CHECK-NEXT: [[LCMP_MOD_NOT:%.*]] = icmp eq i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label [[EXIT]], label [[LOOP_LATCH_EPIL:%.*]] -; CHECK: loop.latch.epil: +; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label [[EXIT]], label [[LOOP_LATCH_EPIL_PREHEADER]] +; CHECK: loop.latch.epil.preheader: +; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[LOOP_LATCH_PREHEADER]] ], [ [[INDVARS_IV_NEXT_1]], [[EXIT_LOOPEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD4:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[LCMP_MOD4]]) ; CHECK-NEXT: [[SRC_IDX_EPIL:%.*]] = getelementptr <8 x half>, ptr [[SRC]], i64 [[INDVARS_IV_UNR]] ; CHECK-NEXT: [[L_EPIL:%.*]] = load <8 x half>, ptr [[SRC_IDX_EPIL]], align 16 ; CHECK-NEXT: [[DST_IDX_EPIL:%.*]] = getelementptr <8 x half>, ptr [[DST]], i64 [[INDVARS_IV_UNR]] @@ -84,7 +86,7 @@ define void @cse_matching_load_from_previous_unrolled_iteration(i32 %N, ptr %src ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[N]] to i64 ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 1 ; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[N]], 1 -; CHECK-NEXT: br i1 [[TMP0]], label [[EXIT_LOOPEXIT_UNR_LCSSA:%.*]], label [[LOOP_LATCH_PREHEADER_NEW:%.*]] +; CHECK-NEXT: br i1 [[TMP0]], label [[LOOP_LATCH_EPIL_PREHEADER:%.*]], label [[LOOP_LATCH_PREHEADER_NEW:%.*]] ; CHECK: loop.latch.preheader.new: ; CHECK-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 2147483646 ; CHECK-NEXT: br label [[LOOP_LATCH:%.*]] @@ -107,12 +109,14 @@ define void @cse_matching_load_from_previous_unrolled_iteration(i32 %N, ptr %src ; CHECK-NEXT: [[INDVARS_IV_NEXT_1]] = add nuw nsw i64 [[INDVARS_IV]], 2 ; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2 ; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]] -; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_LOOPEXIT_UNR_LCSSA]], label [[LOOP_LATCH]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[EXIT_LOOPEXIT_UNR_LCSSA:%.*]], label [[LOOP_LATCH]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: exit.loopexit.unr-lcssa: -; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[LOOP_LATCH_PREHEADER]] ], [ [[INDVARS_IV_NEXT_1]], [[LOOP_LATCH]] ] ; CHECK-NEXT: [[LCMP_MOD_NOT:%.*]] = icmp eq i64 [[XTRAITER]], 0 -; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label [[EXIT]], label [[LOOP_LATCH_EPIL:%.*]] -; CHECK: loop.latch.epil: +; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label [[EXIT]], label [[LOOP_LATCH_EPIL_PREHEADER]] +; CHECK: loop.latch.epil.preheader: +; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[LOOP_LATCH_PREHEADER]] ], [ [[INDVARS_IV_NEXT_1]], [[EXIT_LOOPEXIT_UNR_LCSSA]] ] +; CHECK-NEXT: [[LCMP_MOD4:%.*]] = icmp ne i64 [[XTRAITER]], 0 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[LCMP_MOD4]]) ; CHECK-NEXT: [[GEP_SRC_12_EPIL:%.*]] = getelementptr <2 x i32>, ptr [[SRC_12]], i64 [[INDVARS_IV_UNR]] ; CHECK-NEXT: [[L_12_EPIL:%.*]] = load <2 x i32>, ptr [[GEP_SRC_12_EPIL]], align 8 ; CHECK-NEXT: [[GEP_SRC_4_EPIL:%.*]] = getelementptr <2 x i32>, ptr [[SRC_4]], i64 [[INDVARS_IV_UNR]] diff --git a/llvm/test/Transforms/SCCP/binaryops-constexprs.ll b/llvm/test/Transforms/SCCP/binaryops-constexprs.ll index 31d816c..bf4a366 100644 --- a/llvm/test/Transforms/SCCP/binaryops-constexprs.ll +++ b/llvm/test/Transforms/SCCP/binaryops-constexprs.ll @@ -8,10 +8,12 @@ define void @and_constexpr(i32 %a) { ; CHECK-LABEL: @and_constexpr( ; CHECK-NEXT: entry: ; CHECK-NEXT: call void @use.i32(i32 0) -; CHECK-NEXT: [[AND_2:%.*]] = and i32 20, [[A:%.*]] +; CHECK-NEXT: [[AND_2:%.*]] = and i32 ptrtoint (ptr inttoptr (i32 20 to ptr) to i32), [[A:%.*]] ; CHECK-NEXT: call void @use.i32(i32 [[AND_2]]) -; CHECK-NEXT: call void @use.i1(i1 true) -; CHECK-NEXT: call void @use.i1(i1 false) +; CHECK-NEXT: [[TRUE_1:%.*]] = icmp ne i32 [[AND_2]], 100 +; CHECK-NEXT: call void @use.i1(i1 [[TRUE_1]]) +; CHECK-NEXT: [[FALSE_1:%.*]] = icmp eq i32 [[AND_2]], 100 +; CHECK-NEXT: call void @use.i1(i1 [[FALSE_1]]) ; CHECK-NEXT: [[COND_1:%.*]] = icmp eq i32 [[AND_2]], 10 ; CHECK-NEXT: call void @use.i1(i1 [[COND_1]]) ; CHECK-NEXT: call void @use.i32(i32 4) @@ -38,7 +40,7 @@ define void @add_constexpr(i32 %a) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i32 0, [[A:%.*]] ; CHECK-NEXT: call void @use.i32(i32 [[ADD_1]]) -; CHECK-NEXT: [[ADD_2:%.*]] = add i32 20, [[A]] +; CHECK-NEXT: [[ADD_2:%.*]] = add i32 ptrtoint (ptr inttoptr (i32 20 to ptr) to i32), [[A]] ; CHECK-NEXT: call void @use.i32(i32 [[ADD_2]]) ; CHECK-NEXT: [[COND_1:%.*]] = icmp ne i32 [[ADD_2]], 100 ; CHECK-NEXT: call void @use.i1(i1 [[COND_1]]) @@ -46,7 +48,7 @@ define void @add_constexpr(i32 %a) { ; CHECK-NEXT: call void @use.i1(i1 [[COND_2]]) ; CHECK-NEXT: [[COND_3:%.*]] = icmp eq i32 [[ADD_2]], 10 ; CHECK-NEXT: call void @use.i1(i1 [[COND_3]]) -; CHECK-NEXT: call void @use.i32(i32 120) +; CHECK-NEXT: call void @use.i32(i32 add (i32 ptrtoint (ptr inttoptr (i32 20 to ptr) to i32), i32 ptrtoint (ptr inttoptr (i32 100 to ptr) to i32))) ; CHECK-NEXT: ret void ; entry: @@ -69,7 +71,7 @@ define void @mul_constexpr(i32 %a) { ; CHECK-LABEL: @mul_constexpr( ; CHECK-NEXT: entry: ; CHECK-NEXT: call void @use.i32(i32 0) -; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 20, [[A:%.*]] +; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 ptrtoint (ptr inttoptr (i32 20 to ptr) to i32), [[A:%.*]] ; CHECK-NEXT: call void @use.i32(i32 [[MUL_2]]) ; CHECK-NEXT: [[COND_1:%.*]] = icmp ne i32 [[MUL_2]], 100 ; CHECK-NEXT: call void @use.i1(i1 [[COND_1]]) @@ -77,7 +79,8 @@ define void @mul_constexpr(i32 %a) { ; CHECK-NEXT: call void @use.i1(i1 [[COND_2]]) ; CHECK-NEXT: [[COND_3:%.*]] = icmp eq i32 [[MUL_2]], 10 ; CHECK-NEXT: call void @use.i1(i1 [[COND_3]]) -; CHECK-NEXT: call void @use.i32(i32 2000) +; CHECK-NEXT: [[MUL_3:%.*]] = mul i32 ptrtoint (ptr inttoptr (i32 20 to ptr) to i32), ptrtoint (ptr inttoptr (i32 100 to ptr) to i32) +; CHECK-NEXT: call void @use.i32(i32 [[MUL_3]]) ; CHECK-NEXT: ret void ; entry: @@ -100,13 +103,16 @@ define void @udiv_constexpr(i32 %a) { ; CHECK-LABEL: @udiv_constexpr( ; CHECK-NEXT: entry: ; CHECK-NEXT: call void @use.i32(i32 0) -; CHECK-NEXT: [[UDIV_2:%.*]] = udiv i32 20, [[A:%.*]] +; CHECK-NEXT: [[UDIV_2:%.*]] = udiv i32 ptrtoint (ptr inttoptr (i32 20 to ptr) to i32), [[A:%.*]] ; CHECK-NEXT: call void @use.i32(i32 [[UDIV_2]]) -; CHECK-NEXT: call void @use.i1(i1 true) -; CHECK-NEXT: call void @use.i1(i1 false) +; CHECK-NEXT: [[TRUE_1:%.*]] = icmp ne i32 [[UDIV_2]], 100 +; CHECK-NEXT: call void @use.i1(i1 [[TRUE_1]]) +; CHECK-NEXT: [[FALSE_1:%.*]] = icmp eq i32 [[UDIV_2]], 50 +; CHECK-NEXT: call void @use.i1(i1 [[FALSE_1]]) ; CHECK-NEXT: [[COND_1:%.*]] = icmp eq i32 [[UDIV_2]], 10 ; CHECK-NEXT: call void @use.i1(i1 [[COND_1]]) -; CHECK-NEXT: call void @use.i32(i32 0) +; CHECK-NEXT: [[UDIV_3:%.*]] = udiv i32 ptrtoint (ptr inttoptr (i32 20 to ptr) to i32), ptrtoint (ptr inttoptr (i32 100 to ptr) to i32) +; CHECK-NEXT: call void @use.i32(i32 [[UDIV_3]]) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/Transforms/SimplifyCFG/merge-calls-alloc-token.ll b/llvm/test/Transforms/SimplifyCFG/merge-calls-alloc-token.ll new file mode 100644 index 0000000..9bbe3eb --- /dev/null +++ b/llvm/test/Transforms/SimplifyCFG/merge-calls-alloc-token.ll @@ -0,0 +1,104 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=simplifycfg -S | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128" + +declare ptr @_Znwm(i64) + +define ptr @test_merge_alloc_token_same(i1 %b) { +; CHECK-LABEL: define ptr @test_merge_alloc_token_same( +; CHECK-SAME: i1 [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CALL:%.*]] = call ptr @_Znwm(i64 4), !alloc_token [[META0:![0-9]+]] +; CHECK-NEXT: ret ptr [[CALL]] +; +entry: + br i1 %b, label %if.then, label %if.else + +if.then: + %call = call ptr @_Znwm(i64 4), !alloc_token !0 + br label %if.end + +if.else: + %call1 = call ptr @_Znwm(i64 4), !alloc_token !0 + br label %if.end + +if.end: + %x.0 = phi ptr [ %call, %if.then ], [ %call1, %if.else ] + ret ptr %x.0 +} + +define ptr @test_merge_alloc_token_different(i1 %b) { +; CHECK-LABEL: define ptr @test_merge_alloc_token_different( +; CHECK-SAME: i1 [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CALL:%.*]] = call ptr @_Znwm(i64 4) +; CHECK-NEXT: ret ptr [[CALL]] +; +entry: + br i1 %b, label %if.then, label %if.else + +if.then: + %call = call ptr @_Znwm(i64 4), !alloc_token !0 + br label %if.end + +if.else: + %call1 = call ptr @_Znwm(i64 4), !alloc_token !1 + br label %if.end + +if.end: + %x.0 = phi ptr [ %call, %if.then ], [ %call1, %if.else ] + ret ptr %x.0 +} + +define ptr @test_merge_alloc_token_some1(i1 %b) { +; CHECK-LABEL: define ptr @test_merge_alloc_token_some1( +; CHECK-SAME: i1 [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CALL:%.*]] = call ptr @_Znwm(i64 4) +; CHECK-NEXT: ret ptr [[CALL]] +; +entry: + br i1 %b, label %if.then, label %if.else + +if.then: + %call = call ptr @_Znwm(i64 4), !alloc_token !0 + br label %if.end + +if.else: + %call1 = call ptr @_Znwm(i64 4) + br label %if.end + +if.end: + %x.0 = phi ptr [ %call, %if.then ], [ %call1, %if.else ] + ret ptr %x.0 +} + +define ptr @test_merge_alloc_token_some2(i1 %b) { +; CHECK-LABEL: define ptr @test_merge_alloc_token_some2( +; CHECK-SAME: i1 [[B:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CALL:%.*]] = call ptr @_Znwm(i64 4) +; CHECK-NEXT: ret ptr [[CALL]] +; +entry: + br i1 %b, label %if.then, label %if.else + +if.then: + %call = call ptr @_Znwm(i64 4) + br label %if.end + +if.else: + %call1 = call ptr @_Znwm(i64 4), !alloc_token !0 + br label %if.end + +if.end: + %x.0 = phi ptr [ %call, %if.then ], [ %call1, %if.else ] + ret ptr %x.0 +} + +!0 = !{!"int"} +!1 = !{!"char[4]"} +;. +; CHECK: [[META0]] = !{!"int"} +;. diff --git a/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s b/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s index c8a5746..da83c54 100644 --- a/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s +++ b/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s @@ -2,8 +2,8 @@ REQUIRES: aarch64-registered-target // Flakey on SVE buildbots, disabled pending invesgitation. UNSUPPORTED: target={{.*}} -RUN: llvm-exegesis -mtriple=aarch64 -mcpu=neoverse-v2 -mode=latency --dump-object-to-disk=%d --opcode-name=FMOVWSr --benchmark-phase=assemble-measured-code 2>&1 -RUN: llvm-objdump -d %d > %t.s +RUN: llvm-exegesis -mtriple=aarch64 -mcpu=neoverse-v2 -mode=latency --dump-object-to-disk=%t.obj --opcode-name=FMOVWSr --benchmark-phase=assemble-measured-code 2>&1 +RUN: llvm-objdump -d %t.obj > %t.s RUN: FileCheck %s < %t.s CHECK-NOT: ld{{[1-4]}} diff --git a/llvm/test/tools/llvm-offload-binary/llvm-offload-binary.ll b/llvm/test/tools/llvm-offload-binary/llvm-offload-binary.ll new file mode 100644 index 0000000..b196c24 --- /dev/null +++ b/llvm/test/tools/llvm-offload-binary/llvm-offload-binary.ll @@ -0,0 +1,10 @@ +; RUN: llvm-offload-binary -o %t --image=file=%s,arch=abc,triple=x-y-z +; RUN: llvm-objdump --offloading %t | FileCheck %s +; RUN: llvm-offload-binary %t --image=file=%t2,arch=abc,triple=x-y-z +; RUN: diff %s %t2 + +; CHECK: OFFLOADING IMAGE [0]: +; CHECK-NEXT: kind <none> +; CHECK-NEXT: arch abc +; CHECK-NEXT: triple x-y-z +; CHECK-NEXT: producer none diff --git a/llvm/tools/llvm-offload-binary/CMakeLists.txt b/llvm/tools/llvm-offload-binary/CMakeLists.txt new file mode 100644 index 0000000..6f46f1b --- /dev/null +++ b/llvm/tools/llvm-offload-binary/CMakeLists.txt @@ -0,0 +1,13 @@ +set(LLVM_LINK_COMPONENTS + BinaryFormat + Object + Support) + +add_llvm_tool(llvm-offload-binary + llvm-offload-binary.cpp + + DEPENDS + intrinsics_gen + ) +# Legacy binary name to be removed at a later release. +add_llvm_tool_symlink(clang-offload-packager llvm-offload-binary) diff --git a/llvm/tools/llvm-offload-binary/llvm-offload-binary.cpp b/llvm/tools/llvm-offload-binary/llvm-offload-binary.cpp new file mode 100644 index 0000000..b1bc335 --- /dev/null +++ b/llvm/tools/llvm-offload-binary/llvm-offload-binary.cpp @@ -0,0 +1,259 @@ +//===-- llvm-offload-binary.cpp - offload binary management utility -------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This tool takes several device object files and bundles them into a single +// binary image using a custom binary format. This is intended to be used to +// embed many device files into an application to create a fat binary. It also +// supports extracting these files from a known location. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/StringExtras.h" +#include "llvm/BinaryFormat/Magic.h" +#include "llvm/Object/ArchiveWriter.h" +#include "llvm/Object/OffloadBinary.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/FileOutputBuffer.h" +#include "llvm/Support/FileSystem.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/Path.h" +#include "llvm/Support/Signals.h" +#include "llvm/Support/StringSaver.h" +#include "llvm/Support/WithColor.h" + +using namespace llvm; +using namespace llvm::object; + +static cl::opt<bool> Help("h", cl::desc("Alias for -help"), cl::Hidden); + +static cl::OptionCategory OffloadBinaryCategory("llvm-offload-binary options"); + +static cl::opt<std::string> OutputFile("o", cl::desc("Write output to <file>."), + cl::value_desc("file"), + cl::cat(OffloadBinaryCategory)); + +static cl::opt<std::string> InputFile(cl::Positional, + cl::desc("Extract from <file>."), + cl::value_desc("file"), + cl::cat(OffloadBinaryCategory)); + +static cl::list<std::string> + DeviceImages("image", + cl::desc("List of key and value arguments. Required keywords " + "are 'file' and 'triple'."), + cl::value_desc("<key>=<value>,..."), + cl::cat(OffloadBinaryCategory)); + +static cl::opt<bool> + CreateArchive("archive", + cl::desc("Write extracted files to a static archive"), + cl::cat(OffloadBinaryCategory)); + +/// Path of the current binary. +static const char *PackagerExecutable; + +// Get a map containing all the arguments for the image. Repeated arguments will +// be placed in a comma separated list. +static DenseMap<StringRef, StringRef> getImageArguments(StringRef Image, + StringSaver &Saver) { + DenseMap<StringRef, StringRef> Args; + for (StringRef Arg : llvm::split(Image, ",")) { + auto [Key, Value] = Arg.split("="); + auto [It, Inserted] = Args.try_emplace(Key, Value); + if (!Inserted) + It->second = Saver.save(It->second + "," + Value); + } + + return Args; +} + +static Error writeFile(StringRef Filename, StringRef Data) { + Expected<std::unique_ptr<FileOutputBuffer>> OutputOrErr = + FileOutputBuffer::create(Filename, Data.size()); + if (!OutputOrErr) + return OutputOrErr.takeError(); + std::unique_ptr<FileOutputBuffer> Output = std::move(*OutputOrErr); + llvm::copy(Data, Output->getBufferStart()); + if (Error E = Output->commit()) + return E; + return Error::success(); +} + +static Error bundleImages() { + SmallVector<char, 1024> BinaryData; + raw_svector_ostream OS(BinaryData); + for (StringRef Image : DeviceImages) { + BumpPtrAllocator Alloc; + StringSaver Saver(Alloc); + DenseMap<StringRef, StringRef> Args = getImageArguments(Image, Saver); + + if (!Args.count("triple") || !Args.count("file")) + return createStringError( + inconvertibleErrorCode(), + "'file' and 'triple' are required image arguments"); + + // Permit using multiple instances of `file` in a single string. + for (auto &File : llvm::split(Args["file"], ",")) { + OffloadBinary::OffloadingImage ImageBinary{}; + + llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> ObjectOrErr = + llvm::MemoryBuffer::getFileOrSTDIN(File); + if (std::error_code EC = ObjectOrErr.getError()) + return errorCodeToError(EC); + + // Clang uses the '.o' suffix for LTO bitcode. + if (identify_magic((*ObjectOrErr)->getBuffer()) == file_magic::bitcode) + ImageBinary.TheImageKind = object::IMG_Bitcode; + else if (sys::path::has_extension(File)) + ImageBinary.TheImageKind = + getImageKind(sys::path::extension(File).drop_front()); + else + ImageBinary.TheImageKind = IMG_None; + ImageBinary.Image = std::move(*ObjectOrErr); + for (const auto &[Key, Value] : Args) { + if (Key == "kind") { + ImageBinary.TheOffloadKind = getOffloadKind(Value); + } else if (Key != "file") { + ImageBinary.StringData[Key] = Value; + } + } + llvm::SmallString<0> Buffer = OffloadBinary::write(ImageBinary); + if (Buffer.size() % OffloadBinary::getAlignment() != 0) + return createStringError(inconvertibleErrorCode(), + "Offload binary has invalid size alignment"); + OS << Buffer; + } + } + + if (Error E = writeFile(OutputFile, + StringRef(BinaryData.begin(), BinaryData.size()))) + return E; + return Error::success(); +} + +static Error unbundleImages() { + ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr = + MemoryBuffer::getFileOrSTDIN(InputFile); + if (std::error_code EC = BufferOrErr.getError()) + return createFileError(InputFile, EC); + std::unique_ptr<MemoryBuffer> Buffer = std::move(*BufferOrErr); + + // This data can be misaligned if extracted from an archive. + if (!isAddrAligned(Align(OffloadBinary::getAlignment()), + Buffer->getBufferStart())) + Buffer = MemoryBuffer::getMemBufferCopy(Buffer->getBuffer(), + Buffer->getBufferIdentifier()); + + SmallVector<OffloadFile> Binaries; + if (Error Err = extractOffloadBinaries(*Buffer, Binaries)) + return Err; + + // Try to extract each device image specified by the user from the input file. + for (StringRef Image : DeviceImages) { + BumpPtrAllocator Alloc; + StringSaver Saver(Alloc); + auto Args = getImageArguments(Image, Saver); + + SmallVector<const OffloadBinary *> Extracted; + for (const OffloadFile &File : Binaries) { + const auto *Binary = File.getBinary(); + // We handle the 'file' and 'kind' identifiers differently. + bool Match = llvm::all_of(Args, [&](auto &Arg) { + const auto [Key, Value] = Arg; + if (Key == "file") + return true; + if (Key == "kind") + return Binary->getOffloadKind() == getOffloadKind(Value); + return Binary->getString(Key) == Value; + }); + if (Match) + Extracted.push_back(Binary); + } + + if (Extracted.empty()) + continue; + + if (CreateArchive) { + if (!Args.count("file")) + return createStringError(inconvertibleErrorCode(), + "Image must have a 'file' argument."); + + SmallVector<NewArchiveMember> Members; + for (const OffloadBinary *Binary : Extracted) + Members.emplace_back(MemoryBufferRef( + Binary->getImage(), + Binary->getMemoryBufferRef().getBufferIdentifier())); + + if (Error E = writeArchive( + Args["file"], Members, SymtabWritingMode::NormalSymtab, + Archive::getDefaultKind(), true, false, nullptr)) + return E; + } else if (auto It = Args.find("file"); It != Args.end()) { + if (Extracted.size() > 1) + WithColor::warning(errs(), PackagerExecutable) + << "Multiple inputs match to a single file, '" << It->second + << "'\n"; + if (Error E = writeFile(It->second, Extracted.back()->getImage())) + return E; + } else { + uint64_t Idx = 0; + for (const OffloadBinary *Binary : Extracted) { + StringRef Filename = + Saver.save(sys::path::stem(InputFile) + "-" + Binary->getTriple() + + "-" + Binary->getArch() + "." + std::to_string(Idx++) + + "." + getImageKindName(Binary->getImageKind())); + if (Error E = writeFile(Filename, Binary->getImage())) + return E; + } + } + } + + return Error::success(); +} + +int main(int argc, const char **argv) { + sys::PrintStackTraceOnErrorSignal(argv[0]); + cl::HideUnrelatedOptions(OffloadBinaryCategory); + cl::ParseCommandLineOptions( + argc, argv, + "A utility for bundling several object files into a single binary.\n" + "The output binary can then be embedded into the host section table\n" + "to create a fatbinary containing offloading code.\n"); + + if (sys::path::stem(argv[0]).ends_with("clang-offload-packager")) + WithColor::warning(errs(), PackagerExecutable) + << "'clang-offload-packager' is deprecated. Use 'llvm-offload-binary' " + "instead.\n"; + + if (Help || (OutputFile.empty() && InputFile.empty())) { + cl::PrintHelpMessage(); + return EXIT_SUCCESS; + } + + PackagerExecutable = argv[0]; + auto reportError = [argv](Error E) { + logAllUnhandledErrors(std::move(E), WithColor::error(errs(), argv[0])); + return EXIT_FAILURE; + }; + + if (!InputFile.empty() && !OutputFile.empty()) + return reportError( + createStringError(inconvertibleErrorCode(), + "Packaging to an output file and extracting from an " + "input file are mutually exclusive.")); + + if (!OutputFile.empty()) { + if (Error Err = bundleImages()) + return reportError(std::move(Err)); + } else if (!InputFile.empty()) { + if (Error Err = unbundleImages()) + return reportError(std::move(Err)); + } + + return EXIT_SUCCESS; +} diff --git a/llvm/unittests/IR/FunctionTest.cpp b/llvm/unittests/IR/FunctionTest.cpp index 7ba7584..8ed7699 100644 --- a/llvm/unittests/IR/FunctionTest.cpp +++ b/llvm/unittests/IR/FunctionTest.cpp @@ -625,4 +625,23 @@ TEST(FunctionTest, Personality) { EXPECT_FALSE(LLVMHasPersonalityFn(wrap(F))); } +TEST(FunctionTest, LLVMGetOrInsertFunction) { + LLVMContext Ctx; + Module M("test", Ctx); + Type *Int8Ty = Type::getInt8Ty(Ctx); + FunctionType *FTy = FunctionType::get(Int8Ty, false); + + // Create the function using the C API + LLVMValueRef FuncRef = LLVMGetOrInsertFunction(wrap(&M), "F", 1, wrap(FTy)); + + // Verify that the returned value is a function and has the correct type + Function *Func = unwrap<Function>(FuncRef); + EXPECT_EQ(Func->getName(), "F"); + EXPECT_EQ(Func->getFunctionType(), FTy); + + // Call LLVMGetOrInsertFunction again to ensure it returns the same function + LLVMValueRef FuncRef2 = LLVMGetOrInsertFunction(wrap(&M), "F", 1, wrap(FTy)); + EXPECT_EQ(FuncRef, FuncRef2); +} + } // end namespace diff --git a/llvm/unittests/IR/InstructionsTest.cpp b/llvm/unittests/IR/InstructionsTest.cpp index 21d4596..fe9e7e8 100644 --- a/llvm/unittests/IR/InstructionsTest.cpp +++ b/llvm/unittests/IR/InstructionsTest.cpp @@ -606,82 +606,63 @@ TEST(InstructionTest, ConstrainedTrans) { TEST(InstructionsTest, isEliminableCastPair) { LLVMContext C; + DataLayout DL1("p1:32:32"); - Type* Int16Ty = Type::getInt16Ty(C); - Type* Int32Ty = Type::getInt32Ty(C); - Type* Int64Ty = Type::getInt64Ty(C); - Type *Int64PtrTy = PointerType::get(C, 0); + Type *Int16Ty = Type::getInt16Ty(C); + Type *Int64Ty = Type::getInt64Ty(C); + Type *PtrTy64 = PointerType::get(C, 0); + Type *PtrTy32 = PointerType::get(C, 1); // Source and destination pointers have same size -> bitcast. EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::PtrToInt, - CastInst::IntToPtr, - Int64PtrTy, Int64Ty, Int64PtrTy, - Int32Ty, nullptr, Int32Ty), - CastInst::BitCast); - - // Source and destination have unknown sizes, but the same address space and - // the intermediate int is the maximum pointer size -> bitcast - EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::PtrToInt, - CastInst::IntToPtr, - Int64PtrTy, Int64Ty, Int64PtrTy, - nullptr, nullptr, nullptr), + CastInst::IntToPtr, PtrTy32, Int64Ty, + PtrTy32, &DL1), CastInst::BitCast); - // Source and destination have unknown sizes, but the same address space and - // the intermediate int is not the maximum pointer size -> nothing + // Source and destination have unknown sizes. EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::PtrToInt, - CastInst::IntToPtr, - Int64PtrTy, Int32Ty, Int64PtrTy, - nullptr, nullptr, nullptr), + CastInst::IntToPtr, PtrTy32, Int64Ty, + PtrTy32, nullptr), 0U); // Middle pointer big enough -> bitcast. EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr, - CastInst::PtrToInt, - Int64Ty, Int64PtrTy, Int64Ty, - nullptr, Int64Ty, nullptr), + CastInst::PtrToInt, Int64Ty, PtrTy64, + Int64Ty, &DL1), CastInst::BitCast); // Middle pointer too small -> fail. EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr, - CastInst::PtrToInt, - Int64Ty, Int64PtrTy, Int64Ty, - nullptr, Int32Ty, nullptr), + CastInst::PtrToInt, Int64Ty, PtrTy32, + Int64Ty, &DL1), 0U); // Test that we don't eliminate bitcasts between different address spaces, // or if we don't have available pointer size information. - DataLayout DL("e-p:32:32:32-p1:16:16:16-p2:64:64:64-i1:8:8-i8:8:8-i16:16:16" - "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64" - "-v128:128:128-a:0:64-s:64:64-f80:128:128-n8:16:32:64-S128"); + DataLayout DL2("e-p:32:32:32-p1:16:16:16-p2:64:64:64-i1:8:8-i8:8:8-i16:16:16" + "-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64" + "-v128:128:128-a:0:64-s:64:64-f80:128:128-n8:16:32:64-S128"); Type *Int64PtrTyAS1 = PointerType::get(C, 1); Type *Int64PtrTyAS2 = PointerType::get(C, 2); - IntegerType *Int16SizePtr = DL.getIntPtrType(C, 1); - IntegerType *Int64SizePtr = DL.getIntPtrType(C, 2); - // Cannot simplify inttoptr, addrspacecast EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr, - CastInst::AddrSpaceCast, - Int16Ty, Int64PtrTyAS1, Int64PtrTyAS2, - nullptr, Int16SizePtr, Int64SizePtr), + CastInst::AddrSpaceCast, Int16Ty, + Int64PtrTyAS1, Int64PtrTyAS2, &DL2), 0U); // Cannot simplify addrspacecast, ptrtoint EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::AddrSpaceCast, - CastInst::PtrToInt, - Int64PtrTyAS1, Int64PtrTyAS2, Int16Ty, - Int64SizePtr, Int16SizePtr, nullptr), + CastInst::PtrToInt, Int64PtrTyAS1, + Int64PtrTyAS2, Int16Ty, &DL2), 0U); // Pass since the bitcast address spaces are the same - EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr, - CastInst::BitCast, - Int16Ty, Int64PtrTyAS1, Int64PtrTyAS1, - nullptr, nullptr, nullptr), + EXPECT_EQ(CastInst::isEliminableCastPair( + CastInst::IntToPtr, CastInst::BitCast, Int16Ty, Int64PtrTyAS1, + Int64PtrTyAS1, nullptr), CastInst::IntToPtr); - } TEST(InstructionsTest, CloneCall) { diff --git a/llvm/utils/TableGen/Basic/RuntimeLibcallsEmitter.cpp b/llvm/utils/TableGen/Basic/RuntimeLibcallsEmitter.cpp index c96331c..10f0213 100644 --- a/llvm/utils/TableGen/Basic/RuntimeLibcallsEmitter.cpp +++ b/llvm/utils/TableGen/Basic/RuntimeLibcallsEmitter.cpp @@ -543,8 +543,11 @@ void RuntimeLibcallEmitter::emitSystemRuntimeLibrarySetCalls( OS << "void llvm::RTLIB::RuntimeLibcallsInfo::setTargetRuntimeLibcallSets(" "const llvm::Triple &TT, ExceptionHandling ExceptionModel, " "FloatABI::ABIType FloatABI, EABI EABIVersion, " - "StringRef ABIName) {\n"; - + "StringRef ABIName) {\n" + " struct LibcallImplPair {\n" + " RTLIB::Libcall Func;\n" + " RTLIB::LibcallImpl Impl;\n" + " };\n"; ArrayRef<const Record *> AllLibs = Records.getAllDerivedDefinitions("SystemRuntimeLibrary"); @@ -669,20 +672,36 @@ void RuntimeLibcallEmitter::emitSystemRuntimeLibrarySetCalls( Funcs.erase(UniqueI, Funcs.end()); - StringRef CCEnum; + OS << indent(IndentDepth + 2) + << "static const LibcallImplPair LibraryCalls"; + SubsetPredicate.emitTableVariableNameSuffix(OS); if (FuncsWithCC.CallingConv) - CCEnum = FuncsWithCC.CallingConv->getValueAsString("CallingConv"); + OS << '_' << FuncsWithCC.CallingConv->getName(); + OS << "[] = {\n"; for (const RuntimeLibcallImpl *LibCallImpl : Funcs) { - OS << indent(IndentDepth + 2); - LibCallImpl->emitSetImplCall(OS); + OS << indent(IndentDepth + 6); + LibCallImpl->emitTableEntry(OS); + } - if (FuncsWithCC.CallingConv) { - OS << indent(IndentDepth + 2) << "setLibcallImplCallingConv("; - LibCallImpl->emitEnumEntry(OS); - OS << ", " << CCEnum << ");\n"; - } + OS << indent(IndentDepth + 2) << "};\n\n" + << indent(IndentDepth + 2) + << "for (const auto [Func, Impl] : LibraryCalls"; + SubsetPredicate.emitTableVariableNameSuffix(OS); + if (FuncsWithCC.CallingConv) + OS << '_' << FuncsWithCC.CallingConv->getName(); + + OS << ") {\n" + << indent(IndentDepth + 4) << "setLibcallImpl(Func, Impl);\n"; + + if (FuncsWithCC.CallingConv) { + StringRef CCEnum = + FuncsWithCC.CallingConv->getValueAsString("CallingConv"); + OS << indent(IndentDepth + 4) << "setLibcallImplCallingConv(Impl, " + << CCEnum << ");\n"; } + + OS << indent(IndentDepth + 2) << "}\n"; OS << '\n'; if (!SubsetPredicate.isAlwaysAvailable()) { diff --git a/llvm/utils/emacs/llvm-mode.el b/llvm/utils/emacs/llvm-mode.el index 660d071..240c133 100644 --- a/llvm/utils/emacs/llvm-mode.el +++ b/llvm/utils/emacs/llvm-mode.el @@ -34,7 +34,7 @@ "inaccessiblemem_or_argmemonly" "inalloca" "inlinehint" "jumptable" "minsize" "mustprogress" "naked" "nobuiltin" "nonnull" "nocapture" "nocallback" "nocf_check" "noduplicate" "noext" "nofree" "noimplicitfloat" "noinline" "nomerge" "nonlazybind" "noprofile" "noredzone" "noreturn" "norecurse" "nosync" "noundef" "nounwind" "nosanitize_bounds" "nosanitize_coverage" "null_pointer_is_valid" "optdebug" "optforfuzzing" "optnone" "optsize" "preallocated" "readnone" "readonly" "returned" "returns_twice" - "shadowcallstack" "signext" "speculatable" "speculative_load_hardening" "ssp" "sspreq" "sspstrong" "safestack" "sanitize_address" "sanitize_hwaddress" "sanitize_memtag" + "shadowcallstack" "signext" "speculatable" "speculative_load_hardening" "ssp" "sspreq" "sspstrong" "safestack" "sanitize_address" "sanitize_alloc_token" "sanitize_hwaddress" "sanitize_memtag" "sanitize_thread" "sanitize_memory" "strictfp" "swifterror" "uwtable" "vscale_range" "willreturn" "writeonly" "zeroext") 'symbols) . font-lock-constant-face) ;; Variables '("%[-a-zA-Z$._][-a-zA-Z$._0-9]*" . font-lock-variable-name-face) diff --git a/llvm/utils/gn/secondary/bolt/lib/Passes/BUILD.gn b/llvm/utils/gn/secondary/bolt/lib/Passes/BUILD.gn index a7975bd..393309e 100644 --- a/llvm/utils/gn/secondary/bolt/lib/Passes/BUILD.gn +++ b/llvm/utils/gn/secondary/bolt/lib/Passes/BUILD.gn @@ -30,12 +30,14 @@ static_library("Passes") { "IdenticalCodeFolding.cpp", "IndirectCallPromotion.cpp", "Inliner.cpp", + "InsertNegateRAStatePass.cpp", "Instrumentation.cpp", "JTFootprintReduction.cpp", "LivenessAnalysis.cpp", "LongJmp.cpp", "LoopInversionPass.cpp", "MCF.cpp", + "MarkRAStates.cpp", "PAuthGadgetScanner.cpp", "PLTCall.cpp", "PatchEntries.cpp", diff --git a/llvm/utils/gn/secondary/clang/tools/clang-offload-packager/BUILD.gn b/llvm/utils/gn/secondary/clang/tools/clang-offload-packager/BUILD.gn deleted file mode 100644 index b33b534..0000000 --- a/llvm/utils/gn/secondary/clang/tools/clang-offload-packager/BUILD.gn +++ /dev/null @@ -1,10 +0,0 @@ -executable("clang-offload-packager") { - configs += [ "//llvm/utils/gn/build:clang_code" ] - deps = [ - "//clang/lib/Basic", - "//llvm/lib/Object", - "//llvm/lib/Support", - "//llvm/lib/Target:TargetsToBuild", - ] - sources = [ "ClangOffloadPackager.cpp" ] -} diff --git a/llvm/utils/gn/secondary/clang/tools/driver/BUILD.gn b/llvm/utils/gn/secondary/clang/tools/driver/BUILD.gn index 6f00fca..54fca3b 100644 --- a/llvm/utils/gn/secondary/clang/tools/driver/BUILD.gn +++ b/llvm/utils/gn/secondary/clang/tools/driver/BUILD.gn @@ -60,7 +60,6 @@ driver_executable("clang") { "//clang/tools/clang-linker-wrapper", "//clang/tools/clang-nvlink-wrapper", "//clang/tools/clang-offload-bundler", - "//clang/tools/clang-offload-packager", "//llvm/include/llvm/Config:llvm-config", "//llvm/lib/Analysis", "//llvm/lib/CodeGen", diff --git a/llvm/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/BUILD.gn index a68cee4..9b69a44 100644 --- a/llvm/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/BUILD.gn @@ -34,6 +34,7 @@ static_library("Orc") { "EPCIndirectionUtils.cpp", "ExecutionUtils.cpp", "ExecutorProcessControl.cpp", + "ExecutorResolutionGenerator.cpp", "GetDylibInterface.cpp", "IRCompileLayer.cpp", "IRPartitionLayer.cpp", diff --git a/llvm/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/TargetProcess/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/TargetProcess/BUILD.gn index 0104684..c4ce990 100644 --- a/llvm/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/TargetProcess/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/ExecutionEngine/Orc/TargetProcess/BUILD.gn @@ -7,6 +7,7 @@ static_library("TargetProcess") { ] sources = [ "DefaultHostBootstrapValues.cpp", + "ExecutorResolver.cpp", "ExecutorSharedMemoryMapperService.cpp", "JITLoaderGDB.cpp", "JITLoaderPerf.cpp", diff --git a/llvm/utils/gn/secondary/llvm/lib/Transforms/Instrumentation/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Transforms/Instrumentation/BUILD.gn index a8eb834..2c6204e 100644 --- a/llvm/utils/gn/secondary/llvm/lib/Transforms/Instrumentation/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/Transforms/Instrumentation/BUILD.gn @@ -11,6 +11,7 @@ static_library("Instrumentation") { ] sources = [ "AddressSanitizer.cpp", + "AllocToken.cpp", "BlockCoverageInference.cpp", "BoundsChecking.cpp", "CGProfile.cpp", diff --git a/llvm/utils/gn/secondary/llvm/tools/llvm-profdata/BUILD.gn b/llvm/utils/gn/secondary/llvm/tools/llvm-profdata/BUILD.gn index 959fb44..2f6399f 100644 --- a/llvm/utils/gn/secondary/llvm/tools/llvm-profdata/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/tools/llvm-profdata/BUILD.gn @@ -1,6 +1,6 @@ import("//llvm/utils/gn/build/driver_executable.gni") -driver_executable("llvm-profdata") { +executable("llvm-profdata") { deps = [ "//llvm/lib/Debuginfod", "//llvm/lib/IR", diff --git a/llvm/utils/gn/secondary/llvm/tools/llvm-reduce/BUILD.gn b/llvm/utils/gn/secondary/llvm/tools/llvm-reduce/BUILD.gn index 6aa49d0..0c7affb 100644 --- a/llvm/utils/gn/secondary/llvm/tools/llvm-reduce/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/tools/llvm-reduce/BUILD.gn @@ -31,6 +31,7 @@ executable("llvm-reduce") { "deltas/ReduceGlobalVarInitializers.cpp", "deltas/ReduceGlobalVars.cpp", "deltas/ReduceIRReferences.cpp", + "deltas/ReduceInlineCallSites.cpp", "deltas/ReduceInstructionFlags.cpp", "deltas/ReduceInstructionFlagsMIR.cpp", "deltas/ReduceInstructions.cpp", diff --git a/llvm/utils/llvm.grm b/llvm/utils/llvm.grm index 4113231..dddfe3c 100644 --- a/llvm/utils/llvm.grm +++ b/llvm/utils/llvm.grm @@ -173,6 +173,7 @@ FuncAttr ::= noreturn | returns_twice | nonlazybind | sanitize_address + | sanitize_alloc_token | sanitize_thread | sanitize_memory | mustprogress diff --git a/llvm/utils/vim/syntax/llvm.vim b/llvm/utils/vim/syntax/llvm.vim index e3b8ff8..e048caa 100644 --- a/llvm/utils/vim/syntax/llvm.vim +++ b/llvm/utils/vim/syntax/llvm.vim @@ -163,6 +163,7 @@ syn keyword llvmKeyword \ returns_twice \ safestack \ sanitize_address + \ sanitize_alloc_token \ sanitize_hwaddress \ sanitize_memory \ sanitize_memtag diff --git a/llvm/utils/vscode/llvm/syntaxes/ll.tmLanguage.yaml b/llvm/utils/vscode/llvm/syntaxes/ll.tmLanguage.yaml index b644823..1faaf6b 100644 --- a/llvm/utils/vscode/llvm/syntaxes/ll.tmLanguage.yaml +++ b/llvm/utils/vscode/llvm/syntaxes/ll.tmLanguage.yaml @@ -258,6 +258,7 @@ patterns: \\breturns_twice\\b|\ \\bsafestack\\b|\ \\bsanitize_address\\b|\ + \\bsanitize_alloc_token\\b|\ \\bsanitize_hwaddress\\b|\ \\bsanitize_memory\\b|\ \\bsanitize_memtag\\b|\ |