diff options
Diffstat (limited to 'llvm')
232 files changed, 17506 insertions, 5316 deletions
diff --git a/llvm/docs/TableGen/ProgRef.rst b/llvm/docs/TableGen/ProgRef.rst index 2b1af05..0ff4cc7 100644 --- a/llvm/docs/TableGen/ProgRef.rst +++ b/llvm/docs/TableGen/ProgRef.rst @@ -64,7 +64,7 @@ Classes and concrete records have a unique *name*, either chosen by the programmer or generated by TableGen. Associated with that name is a list of *fields* with values and an optional list of *parent classes* (sometimes called base or super classes). The fields are the primary data that -backends will process. Note that TableGen assigns no meanings to fields; the +backends will process. Note that TableGen assigns no meaning to fields; the meanings are entirely up to the backends and the programs that incorporate the output of those backends. @@ -243,7 +243,7 @@ Include files ------------- TableGen has an include mechanism. The content of the included file -lexically replaces the ``include`` directive and is then parsed as if it was +lexically replaces the ``include`` directive and is then parsed as if it were originally in the main file. .. productionlist:: @@ -670,17 +670,17 @@ name of a multiclass. The argument values can be specified in two forms: * Positional argument (``value``). The value is assigned to the argument in the - corresponding position. For ``Foo<a0, a1>``, ``a0`` will be assigned to first - argument and ``a1`` will be assigned to second argument. + corresponding position. For ``Foo<a0, a1>``, ``a0`` will be assigned to the first + argument and ``a1`` will be assigned to the second argument. * Named argument (``name=value``). The value is assigned to the argument with the specified name. For ``Foo<a=a0, b=a1>``, ``a0`` will be assigned to the argument with name ``a`` and ``a1`` will be assigned to the argument with name ``b``. -Required arguments can also be specified as named argument. +Required arguments can also be specified as a named argument. Note that the argument can only be specified once regardless of the way (named -or positional) to specify and positional arguments should be put before named +or positional) to specify and positional arguments should precede named arguments. .. productionlist:: @@ -817,7 +817,7 @@ type. It provides a single field, ``Value``, which holds a 3-bit number. Its template argument, ``val``, is used to set the ``Value`` field. Each of the eight records is defined with ``FPFormat`` as its parent class. The enumeration value is passed in angle brackets as the template argument. Each -record will inherent the ``Value`` field with the appropriate enumeration +record will inherit the ``Value`` field with the appropriate enumeration value. Here is a more complex example of classes with template arguments. First, we @@ -1308,7 +1308,7 @@ with ``F0``, ``F1``, ``F2``, and ``F3``. ------------------------------------- A ``dump`` statement prints the input string to standard error -output. It is intended for debugging purpose. +output. It is intended for debugging purposes. * At top level, the message is printed immediately. @@ -1727,7 +1727,7 @@ and non-0 as true. ``!div(``\ *a*\ ``,`` *b*\ ``)`` This operator performs signed division of *a* by *b*, and produces the quotient. - Division by 0 produces an error. Division of INT64_MIN by -1 produces an error. + Division by 0 produces an error. Division of ``INT64_MIN`` by -1 produces an error. ``!empty(``\ *a*\ ``)`` This operator produces 1 if the string, list, or DAG *a* is empty; 0 otherwise. @@ -1914,7 +1914,7 @@ and non-0 as true. ``!or(``\ *a*\ ``,`` *b*\ ``, ...)`` This operator does a bitwise OR on *a*, *b*, etc., and produces the result. A logical OR can be performed if all the arguments are either - 0 or 1. This operator is short-circuit to -1 (all ones) the left-most + 0 or 1. This operator is short-circuit to -1 (all ones) when the left-most operand is -1. ``!range([``\ *start*\ ``,]`` *end*\ ``[,``\ *step*\ ``])`` @@ -1937,7 +1937,7 @@ and non-0 as true. Equivalent to ``!range(0, !size(list))``. ``!repr(``\ *value*\ ``)`` - Represents *value* as a string. String format for the value is not + Represents *value* as a string. The string format for the value is not guaranteed to be stable. Intended for debugging purposes only. ``!setdagarg(``\ *dag*\ ``,``\ *key*\ ``,``\ *arg*\ ``)`` diff --git a/llvm/include/llvm/ADT/BitVector.h b/llvm/include/llvm/ADT/BitVector.h index 83350e6..9e81a4b 100644 --- a/llvm/include/llvm/ADT/BitVector.h +++ b/llvm/include/llvm/ADT/BitVector.h @@ -570,10 +570,7 @@ public: template <class F, class... ArgTys> static BitVector &apply(F &&f, BitVector &Out, BitVector const &Arg, ArgTys const &...Args) { - assert(llvm::all_of( - std::initializer_list<unsigned>{Args.size()...}, - [&Arg](auto const &BV) { return Arg.size() == BV; }) && - "consistent sizes"); + assert(((Arg.size() == Args.size()) && ...) && "consistent sizes"); Out.resize(Arg.size()); for (size_type I = 0, E = Arg.Bits.size(); I != E; ++I) Out.Bits[I] = f(Arg.Bits[I], Args.Bits[I]...); diff --git a/llvm/include/llvm/ADT/ConcurrentHashtable.h b/llvm/include/llvm/ADT/ConcurrentHashtable.h index 6de194d..6a943c5 100644 --- a/llvm/include/llvm/ADT/ConcurrentHashtable.h +++ b/llvm/include/llvm/ADT/ConcurrentHashtable.h @@ -253,9 +253,8 @@ public: OS << "\nOverall number of entries = " << OverallNumberOfEntries; OS << "\nOverall number of non empty buckets = " << NumberOfNonEmptyBuckets; - for (auto &BucketSize : BucketSizesMap) - OS << "\n Number of buckets with size " << BucketSize.first << ": " - << BucketSize.second; + for (auto [Size, Count] : BucketSizesMap) + OS << "\n Number of buckets with size " << Size << ": " << Count; std::stringstream stream; stream << std::fixed << std::setprecision(2) diff --git a/llvm/include/llvm/ADT/DirectedGraph.h b/llvm/include/llvm/ADT/DirectedGraph.h index 83c0bea..fb6b180 100644 --- a/llvm/include/llvm/ADT/DirectedGraph.h +++ b/llvm/include/llvm/ADT/DirectedGraph.h @@ -181,16 +181,6 @@ public: DirectedGraph() = default; explicit DirectedGraph(NodeType &N) : Nodes() { addNode(N); } - DirectedGraph(const DGraphType &G) : Nodes(G.Nodes) {} - DirectedGraph(DGraphType &&RHS) : Nodes(std::move(RHS.Nodes)) {} - DGraphType &operator=(const DGraphType &G) { - Nodes = G.Nodes; - return *this; - } - DGraphType &operator=(const DGraphType &&G) { - Nodes = std::move(G.Nodes); - return *this; - } const_iterator begin() const { return Nodes.begin(); } const_iterator end() const { return Nodes.end(); } diff --git a/llvm/include/llvm/Analysis/IR2Vec.h b/llvm/include/llvm/Analysis/IR2Vec.h index b7c3015..ed43f19 100644 --- a/llvm/include/llvm/Analysis/IR2Vec.h +++ b/llvm/include/llvm/Analysis/IR2Vec.h @@ -210,6 +210,13 @@ public: const_iterator end() const { return const_iterator(this, getNumSections(), 0); } + + using VocabMap = std::map<std::string, Embedding>; + /// Parse a vocabulary section from JSON and populate the target vocabulary + /// map. + static Error parseVocabSection(StringRef Key, + const json::Value &ParsedVocabValue, + VocabMap &TargetVocab, unsigned &Dim); }; /// Class for storing and accessing the IR2Vec vocabulary. @@ -600,8 +607,6 @@ class IR2VecVocabAnalysis : public AnalysisInfoMixin<IR2VecVocabAnalysis> { Error readVocabulary(VocabMap &OpcVocab, VocabMap &TypeVocab, VocabMap &ArgVocab); - Error parseVocabSection(StringRef Key, const json::Value &ParsedVocabValue, - VocabMap &TargetVocab, unsigned &Dim); void generateVocabStorage(VocabMap &OpcVocab, VocabMap &TypeVocab, VocabMap &ArgVocab); void emitError(Error Err, LLVMContext &Ctx); diff --git a/llvm/include/llvm/Analysis/MemoryProfileInfo.h b/llvm/include/llvm/Analysis/MemoryProfileInfo.h index be690a4..571caf9 100644 --- a/llvm/include/llvm/Analysis/MemoryProfileInfo.h +++ b/llvm/include/llvm/Analysis/MemoryProfileInfo.h @@ -59,14 +59,6 @@ LLVM_ABI std::string getAllocTypeAttributeString(AllocationType Type); /// True if the AllocTypes bitmask contains just a single type. LLVM_ABI bool hasSingleAllocType(uint8_t AllocTypes); -/// Removes any existing "ambiguous" memprof attribute. Called before we apply a -/// specific allocation type such as "cold", "notcold", or "hot". -LLVM_ABI void removeAnyExistingAmbiguousAttribute(CallBase *CB); - -/// Adds an "ambiguous" memprof attribute to call with a matched allocation -/// profile but that we haven't yet been able to disambiguate. -LLVM_ABI void addAmbiguousAttribute(CallBase *CB); - /// Class to build a trie of call stack contexts for a particular profiled /// allocation call, along with their associated allocation types. /// The allocation will be at the root of the trie, which is then used to diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h index 15ff129..af218ba 100644 --- a/llvm/include/llvm/Analysis/ValueTracking.h +++ b/llvm/include/llvm/Analysis/ValueTracking.h @@ -613,6 +613,12 @@ LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const DominatorTree *DT = nullptr, bool AllowEphemerals = false); +/// Returns true, if no instruction between \p Assume and \p CtxI may free +/// memory and the function is marked as NoSync. The latter ensures the current +/// function cannot arrange for another thread to free on its behalf. +LLVM_ABI bool willNotFreeBetween(const Instruction *Assume, + const Instruction *CtxI); + enum class OverflowResult { /// Always overflows in the direction of signed/unsigned min value. AlwaysOverflowsLow, diff --git a/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/llvm/include/llvm/CodeGen/MachineFrameInfo.h index b37c677..50ce931 100644 --- a/llvm/include/llvm/CodeGen/MachineFrameInfo.h +++ b/llvm/include/llvm/CodeGen/MachineFrameInfo.h @@ -501,8 +501,12 @@ public: StackID == TargetStackID::ScalablePredicateVector; } - bool isScalableStackID(int ObjectIdx) const { + bool hasScalableStackID(int ObjectIdx) const { uint8_t StackID = getStackID(ObjectIdx); + return isScalableStackID(StackID); + } + + bool isScalableStackID(uint8_t StackID) const { return StackID == TargetStackID::ScalableVector || StackID == TargetStackID::ScalablePredicateVector; } diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h index bf133f0..822245f 100644 --- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h +++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h @@ -109,10 +109,15 @@ public: return MC->contains(Reg1.asMCReg(), Reg2.asMCReg()); } - /// Return the cost of copying a value between two registers in this class. - /// A negative number means the register class is very expensive - /// to copy e.g. status flag register classes. - int getCopyCost() const { return MC->getCopyCost(); } + /// Return the cost of copying a value between two registers in this class. If + /// this is the maximum value, the register may be impossible to copy. + uint8_t getCopyCost() const { return MC->getCopyCost(); } + + /// \return true if register class is very expensive to copy e.g. status flag + /// register classes. + bool expensiveOrImpossibleToCopy() const { + return MC->getCopyCost() == std::numeric_limits<uint8_t>::max(); + } /// Return true if this register class may be used to create virtual /// registers. diff --git a/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h b/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h index 52af205..ffe0b50 100644 --- a/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h +++ b/llvm/include/llvm/DebugInfo/DWARF/LowLevel/DWARFDataExtractorSimple.h @@ -179,6 +179,7 @@ public: class DWARFDataExtractorSimple : public DWARFDataExtractorBase<DWARFDataExtractorSimple> { +public: using DWARFDataExtractorBase::DWARFDataExtractorBase; LLVM_ABI uint64_t getRelocatedValueImpl(uint32_t Size, uint64_t *Off, diff --git a/llvm/include/llvm/Frontend/OpenMP/OMP.td b/llvm/include/llvm/Frontend/OpenMP/OMP.td index 38f95a1..bba0d6e 100644 --- a/llvm/include/llvm/Frontend/OpenMP/OMP.td +++ b/llvm/include/llvm/Frontend/OpenMP/OMP.td @@ -1333,6 +1333,9 @@ def OMP_Tile : Directive<[Spelling<"tile">]> { let allowedOnceClauses = [ VersionedClause<OMPC_Sizes, 51>, ]; + let requiredClauses = [ + VersionedClause<OMPC_Sizes, 51>, + ]; let association = AS_Loop; let category = CA_Executable; } diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h index eb0440f..0622bfa 100644 --- a/llvm/include/llvm/IR/IntrinsicInst.h +++ b/llvm/include/llvm/IR/IntrinsicInst.h @@ -810,6 +810,26 @@ public: /// Whether the intrinsic is signed or unsigned. bool isSigned() const { return isSigned(getIntrinsicID()); }; + /// Whether the intrinsic is a smin or umin. + static bool isMin(Intrinsic::ID ID) { + switch (ID) { + case Intrinsic::umin: + case Intrinsic::smin: + return true; + case Intrinsic::umax: + case Intrinsic::smax: + return false; + default: + llvm_unreachable("Invalid intrinsic"); + } + } + + /// Whether the intrinsic is a smin or a umin. + bool isMin() const { return isMin(getIntrinsicID()); } + + /// Whether the intrinsic is a smax or a umax. + bool isMax() const { return !isMin(getIntrinsicID()); } + /// Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, /// so there is a certain threshold value, upon reaching which, /// their value can no longer change. Return said threshold. diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h index 6168e24..2e31fe5 100644 --- a/llvm/include/llvm/IR/PatternMatch.h +++ b/llvm/include/llvm/IR/PatternMatch.h @@ -2773,6 +2773,14 @@ m_MaskedLoad(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2, return m_Intrinsic<Intrinsic::masked_load>(Op0, Op1, Op2, Op3); } +/// Matches MaskedStore Intrinsic. +template <typename Opnd0, typename Opnd1, typename Opnd2, typename Opnd3> +inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2, Opnd3>::Ty +m_MaskedStore(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2, + const Opnd3 &Op3) { + return m_Intrinsic<Intrinsic::masked_store>(Op0, Op1, Op2, Op3); +} + /// Matches MaskedGather Intrinsic. template <typename Opnd0, typename Opnd1, typename Opnd2, typename Opnd3> inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2, Opnd3>::Ty diff --git a/llvm/include/llvm/MC/MCRegisterInfo.h b/llvm/include/llvm/MC/MCRegisterInfo.h index aad3792..e6fc707 100644 --- a/llvm/include/llvm/MC/MCRegisterInfo.h +++ b/llvm/include/llvm/MC/MCRegisterInfo.h @@ -45,7 +45,7 @@ public: const uint16_t RegSetSize; const uint16_t ID; const uint16_t RegSizeInBits; - const int8_t CopyCost; + const uint8_t CopyCost; const bool Allocatable; const bool BaseClass; @@ -94,7 +94,7 @@ public: /// getCopyCost - Return the cost of copying a value between two registers in /// this class. A negative number means the register class is very expensive /// to copy e.g. status flag register classes. - int getCopyCost() const { return CopyCost; } + uint8_t getCopyCost() const { return CopyCost; } /// isAllocatable - Return true if this register class may be used to create /// virtual registers. diff --git a/llvm/include/llvm/Transforms/Scalar/GVN.h b/llvm/include/llvm/Transforms/Scalar/GVN.h index 2454149..74a4d6c 100644 --- a/llvm/include/llvm/Transforms/Scalar/GVN.h +++ b/llvm/include/llvm/Transforms/Scalar/GVN.h @@ -56,6 +56,7 @@ class OptimizationRemarkEmitter; class PHINode; class TargetLibraryInfo; class Value; +class IntrinsicInst; /// A private "module" namespace for types and utilities used by GVN. These /// are implementation details and should not be used by clients. namespace LLVM_LIBRARY_VISIBILITY_NAMESPACE gvn { @@ -349,6 +350,7 @@ private: // Helper functions of redundant load elimination. bool processLoad(LoadInst *L); + bool processMaskedLoad(IntrinsicInst *I); bool processNonLocalLoad(LoadInst *L); bool processAssumeIntrinsic(AssumeInst *II); diff --git a/llvm/lib/Analysis/HashRecognize.cpp b/llvm/lib/Analysis/HashRecognize.cpp index 5d7ee1f..4529123 100644 --- a/llvm/lib/Analysis/HashRecognize.cpp +++ b/llvm/lib/Analysis/HashRecognize.cpp @@ -97,7 +97,7 @@ static bool containsUnreachable(const Loop &L, } } } - return std::distance(Latch->begin(), Latch->end()) != Visited.size(); + return Latch->size() != Visited.size(); } /// A structure that can hold either a Simple Recurrence or a Conditional diff --git a/llvm/lib/Analysis/IR2Vec.cpp b/llvm/lib/Analysis/IR2Vec.cpp index af30422..295b6d3 100644 --- a/llvm/lib/Analysis/IR2Vec.cpp +++ b/llvm/lib/Analysis/IR2Vec.cpp @@ -330,6 +330,43 @@ bool VocabStorage::const_iterator::operator!=( return !(*this == Other); } +Error VocabStorage::parseVocabSection(StringRef Key, + const json::Value &ParsedVocabValue, + VocabMap &TargetVocab, unsigned &Dim) { + json::Path::Root Path(""); + const json::Object *RootObj = ParsedVocabValue.getAsObject(); + if (!RootObj) + return createStringError(errc::invalid_argument, + "JSON root is not an object"); + + const json::Value *SectionValue = RootObj->get(Key); + if (!SectionValue) + return createStringError(errc::invalid_argument, + "Missing '" + std::string(Key) + + "' section in vocabulary file"); + if (!json::fromJSON(*SectionValue, TargetVocab, Path)) + return createStringError(errc::illegal_byte_sequence, + "Unable to parse '" + std::string(Key) + + "' section from vocabulary"); + + Dim = TargetVocab.begin()->second.size(); + if (Dim == 0) + return createStringError(errc::illegal_byte_sequence, + "Dimension of '" + std::string(Key) + + "' section of the vocabulary is zero"); + + if (!std::all_of(TargetVocab.begin(), TargetVocab.end(), + [Dim](const std::pair<StringRef, Embedding> &Entry) { + return Entry.second.size() == Dim; + })) + return createStringError( + errc::illegal_byte_sequence, + "All vectors in the '" + std::string(Key) + + "' section of the vocabulary are not of the same dimension"); + + return Error::success(); +} + // ==----------------------------------------------------------------------===// // Vocabulary //===----------------------------------------------------------------------===// @@ -460,43 +497,6 @@ VocabStorage Vocabulary::createDummyVocabForTest(unsigned Dim) { // IR2VecVocabAnalysis //===----------------------------------------------------------------------===// -Error IR2VecVocabAnalysis::parseVocabSection( - StringRef Key, const json::Value &ParsedVocabValue, VocabMap &TargetVocab, - unsigned &Dim) { - json::Path::Root Path(""); - const json::Object *RootObj = ParsedVocabValue.getAsObject(); - if (!RootObj) - return createStringError(errc::invalid_argument, - "JSON root is not an object"); - - const json::Value *SectionValue = RootObj->get(Key); - if (!SectionValue) - return createStringError(errc::invalid_argument, - "Missing '" + std::string(Key) + - "' section in vocabulary file"); - if (!json::fromJSON(*SectionValue, TargetVocab, Path)) - return createStringError(errc::illegal_byte_sequence, - "Unable to parse '" + std::string(Key) + - "' section from vocabulary"); - - Dim = TargetVocab.begin()->second.size(); - if (Dim == 0) - return createStringError(errc::illegal_byte_sequence, - "Dimension of '" + std::string(Key) + - "' section of the vocabulary is zero"); - - if (!std::all_of(TargetVocab.begin(), TargetVocab.end(), - [Dim](const std::pair<StringRef, Embedding> &Entry) { - return Entry.second.size() == Dim; - })) - return createStringError( - errc::illegal_byte_sequence, - "All vectors in the '" + std::string(Key) + - "' section of the vocabulary are not of the same dimension"); - - return Error::success(); -} - // FIXME: Make this optional. We can avoid file reads // by auto-generating a default vocabulary during the build time. Error IR2VecVocabAnalysis::readVocabulary(VocabMap &OpcVocab, @@ -513,16 +513,16 @@ Error IR2VecVocabAnalysis::readVocabulary(VocabMap &OpcVocab, return ParsedVocabValue.takeError(); unsigned OpcodeDim = 0, TypeDim = 0, ArgDim = 0; - if (auto Err = - parseVocabSection("Opcodes", *ParsedVocabValue, OpcVocab, OpcodeDim)) + if (auto Err = VocabStorage::parseVocabSection("Opcodes", *ParsedVocabValue, + OpcVocab, OpcodeDim)) return Err; - if (auto Err = - parseVocabSection("Types", *ParsedVocabValue, TypeVocab, TypeDim)) + if (auto Err = VocabStorage::parseVocabSection("Types", *ParsedVocabValue, + TypeVocab, TypeDim)) return Err; - if (auto Err = - parseVocabSection("Arguments", *ParsedVocabValue, ArgVocab, ArgDim)) + if (auto Err = VocabStorage::parseVocabSection("Arguments", *ParsedVocabValue, + ArgVocab, ArgDim)) return Err; if (!(OpcodeDim == TypeDim && TypeDim == ArgDim)) diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp index 47dccde..7adb25d 100644 --- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -233,19 +233,25 @@ static bool evaluatePtrAddRecAtMaxBTCWillNotWrap( const SCEV *DerefBytesSCEV = SE.getConstant(WiderTy, DerefBytes); // Check if we have a suitable dereferencable assumption we can use. - if (!StartPtrV->canBeFreed()) { - Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt(); - if (BasicBlock *LoopPred = L->getLoopPredecessor()) { - if (isa<BranchInst>(LoopPred->getTerminator())) - CtxI = LoopPred->getTerminator(); - } - - RetainedKnowledge DerefRK = getKnowledgeValidInContext( - StartPtrV, {Attribute::Dereferenceable}, *AC, CtxI, DT); - if (DerefRK) { - DerefBytesSCEV = - SE.getUMaxExpr(DerefBytesSCEV, SE.getSCEV(DerefRK.IRArgValue)); - } + Instruction *CtxI = &*L->getHeader()->getFirstNonPHIIt(); + if (BasicBlock *LoopPred = L->getLoopPredecessor()) { + if (isa<BranchInst>(LoopPred->getTerminator())) + CtxI = LoopPred->getTerminator(); + } + RetainedKnowledge DerefRK; + getKnowledgeForValue(StartPtrV, {Attribute::Dereferenceable}, *AC, + [&](RetainedKnowledge RK, Instruction *Assume, auto) { + if (!isValidAssumeForContext(Assume, CtxI, DT)) + return false; + if (StartPtrV->canBeFreed() && + !willNotFreeBetween(Assume, CtxI)) + return false; + DerefRK = std::max(DerefRK, RK); + return true; + }); + if (DerefRK) { + DerefBytesSCEV = + SE.getUMaxExpr(DerefBytesSCEV, SE.getSCEV(DerefRK.IRArgValue)); } if (DerefBytesSCEV->isZero()) diff --git a/llvm/lib/Analysis/MemoryProfileInfo.cpp b/llvm/lib/Analysis/MemoryProfileInfo.cpp index 11602d2..0c1f8db 100644 --- a/llvm/lib/Analysis/MemoryProfileInfo.cpp +++ b/llvm/lib/Analysis/MemoryProfileInfo.cpp @@ -125,24 +125,6 @@ bool llvm::memprof::hasSingleAllocType(uint8_t AllocTypes) { return NumAllocTypes == 1; } -void llvm::memprof::removeAnyExistingAmbiguousAttribute(CallBase *CB) { - if (!CB->hasFnAttr("memprof")) - return; - assert(CB->getFnAttr("memprof").getValueAsString() == "ambiguous"); - CB->removeFnAttr("memprof"); -} - -void llvm::memprof::addAmbiguousAttribute(CallBase *CB) { - // We may have an existing ambiguous attribute if we are reanalyzing - // after inlining. - if (CB->hasFnAttr("memprof")) { - assert(CB->getFnAttr("memprof").getValueAsString() == "ambiguous"); - } else { - auto A = llvm::Attribute::get(CB->getContext(), "memprof", "ambiguous"); - CB->addFnAttr(A); - } -} - void CallStackTrie::addCallStack( AllocationType AllocType, ArrayRef<uint64_t> StackIds, std::vector<ContextTotalSize> ContextSizeInfo) { @@ -488,9 +470,6 @@ void CallStackTrie::addSingleAllocTypeAttribute(CallBase *CI, AllocationType AT, StringRef Descriptor) { auto AllocTypeString = getAllocTypeAttributeString(AT); auto A = llvm::Attribute::get(CI->getContext(), "memprof", AllocTypeString); - // After inlining we may be able to convert an existing ambiguous allocation - // to an unambiguous one. - removeAnyExistingAmbiguousAttribute(CI); CI->addFnAttr(A); if (MemProfReportHintedSizes) { std::vector<ContextTotalSize> ContextSizeInfo; @@ -550,7 +529,6 @@ bool CallStackTrie::buildAndAttachMIBMetadata(CallBase *CI) { assert(MIBCallStack.size() == 1 && "Should only be left with Alloc's location in stack"); CI->setMetadata(LLVMContext::MD_memprof, MDNode::get(Ctx, MIBNodes)); - addAmbiguousAttribute(CI); return true; } // If there exists corner case that CallStackTrie has one chain to leaf diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 09a8fbe..1eda7a7 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -89,6 +89,9 @@ using namespace llvm::PatternMatch; static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", cl::Hidden, cl::init(20)); +/// Maximum number of instructions to check between assume and context +/// instruction. +static constexpr unsigned MaxInstrsToCheckForFree = 16; /// Returns the bitwidth of the given scalar or pointer type. For vector types, /// returns the element type's bitwidth. @@ -561,6 +564,29 @@ bool llvm::isValidAssumeForContext(const Instruction *Inv, return false; } +bool llvm::willNotFreeBetween(const Instruction *Assume, + const Instruction *CtxI) { + if (CtxI->getParent() != Assume->getParent() || !Assume->comesBefore(CtxI)) + return false; + // Make sure the current function cannot arrange for another thread to free on + // its behalf. + if (!CtxI->getFunction()->hasNoSync()) + return false; + + // Check if there are any calls between the assume and CtxI that may + // free memory. + for (const auto &[Idx, I] : + enumerate(make_range(Assume->getIterator(), CtxI->getIterator()))) { + // Limit number of instructions to walk. + if (Idx > MaxInstrsToCheckForFree) + return false; + if (const auto *CB = dyn_cast<CallBase>(&I)) + if (!CB->hasFnAttr(Attribute::NoFree)) + return false; + } + return true; +} + // TODO: cmpExcludesZero misses many cases where `RHS` is non-constant but // we still have enough information about `RHS` to conclude non-zero. For // example Pred=EQ, RHS=isKnownNonZero. cmpExcludesZero is called in loops diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp index 1703b27..bc0bb34 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp @@ -618,12 +618,15 @@ bool DwarfExpression::addExpression( case dwarf::DW_OP_dup: case dwarf::DW_OP_push_object_address: case dwarf::DW_OP_over: + case dwarf::DW_OP_rot: case dwarf::DW_OP_eq: case dwarf::DW_OP_ne: case dwarf::DW_OP_gt: case dwarf::DW_OP_ge: case dwarf::DW_OP_lt: case dwarf::DW_OP_le: + case dwarf::DW_OP_neg: + case dwarf::DW_OP_abs: emitOp(OpNum); break; case dwarf::DW_OP_deref: diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp index 62fb5eb..3cfe7cc 100644 --- a/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfUnit.cpp @@ -1889,11 +1889,12 @@ DIE &DwarfUnit::constructMemberDIE(DIE &Buffer, const DIDerivedType *DT) { bool IsBitfield = DT->isBitField(); // Handle the size. - if (auto *Var = dyn_cast_or_null<DIVariable>(DT->getRawSizeInBits())) { + if (DT->getRawSizeInBits() == nullptr) { + // No size, just ignore. + } else if (auto *Var = dyn_cast<DIVariable>(DT->getRawSizeInBits())) { if (auto *VarDIE = getDIE(Var)) addDIEEntry(MemberDie, dwarf::DW_AT_bit_size, *VarDIE); - } else if (auto *Exp = - dyn_cast_or_null<DIExpression>(DT->getRawSizeInBits())) { + } else if (auto *Exp = dyn_cast<DIExpression>(DT->getRawSizeInBits())) { DIELoc *Loc = new (DIEValueAllocator) DIELoc; DIEDwarfExpression DwarfExpr(*Asm, getCU(), *Loc); DwarfExpr.setMemoryLocationKind(); diff --git a/llvm/lib/CodeGen/MachineRegisterInfo.cpp b/llvm/lib/CodeGen/MachineRegisterInfo.cpp index abb3f3e..ae284f3 100644 --- a/llvm/lib/CodeGen/MachineRegisterInfo.cpp +++ b/llvm/lib/CodeGen/MachineRegisterInfo.cpp @@ -83,8 +83,6 @@ constrainRegClass(MachineRegisterInfo &MRI, Register Reg, const TargetRegisterClass *MachineRegisterInfo::constrainRegClass( Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs) { - if (Reg.isPhysical()) - return nullptr; return ::constrainRegClass(*this, Reg, getRegClass(Reg), RC, MinNumRegs); } diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index 11bc64c..bb10cf6 100644 --- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -160,7 +160,7 @@ void InstrEmitter::EmitCopyFromReg(SDValue Op, bool IsClone, Register SrcReg, // If all uses are reading from the src physical register and copying the // register is either impossible or very expensive, then don't create a copy. - if (MatchReg && SrcRC->getCopyCost() < 0) { + if (MatchReg && SrcRC->expensiveOrImpossibleToCopy()) { VRBase = SrcReg; } else { // Create the reg, emit the copy. diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp index 31e7855..7902229 100644 --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp @@ -136,7 +136,7 @@ static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, if (PhysReg) { const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, Def->getSimpleValueType(ResNo)); - Cost = RC->getCopyCost(); + Cost = RC->expensiveOrImpossibleToCopy() ? -1 : RC->getCopyCost(); } } diff --git a/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp b/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp index ec75dc3..64e5cd5 100644 --- a/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp +++ b/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp @@ -72,7 +72,7 @@ struct StackFrameLayoutAnalysis { : Slot(Idx), Size(MFI.getObjectSize(Idx)), Align(MFI.getObjectAlign(Idx).value()), Offset(Offset), SlotTy(Invalid), Scalable(false) { - Scalable = MFI.isScalableStackID(Idx); + Scalable = MFI.hasScalableStackID(Idx); if (MFI.isSpillSlotObjectIndex(Idx)) SlotTy = SlotType::Spill; else if (MFI.isFixedObjectIndex(Idx)) diff --git a/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp b/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp index 23b72da..6e316f1 100644 --- a/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/JITLink.cpp @@ -280,6 +280,9 @@ std::vector<Block *> LinkGraph::splitBlockImpl(std::vector<Block *> Blocks, void LinkGraph::dump(raw_ostream &OS) { DenseMap<Block *, std::vector<Symbol *>> BlockSymbols; + OS << "LinkGraph \"" << getName() + << "\" (triple = " << getTargetTriple().str() << ")\n"; + // Map from blocks to the symbols pointing at them. for (auto *Sym : defined_symbols()) BlockSymbols[&Sym->getBlock()].push_back(Sym); diff --git a/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp b/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp index 584b9f0..17050b0 100644 --- a/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp +++ b/llvm/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp @@ -21,23 +21,21 @@ JITLinkerBase::~JITLinkerBase() = default; void JITLinkerBase::linkPhase1(std::unique_ptr<JITLinkerBase> Self) { - LLVM_DEBUG({ - dbgs() << "Starting link phase 1 for graph " << G->getName() << "\n"; - }); + LLVM_DEBUG(dbgs() << "Starting link phase 1\n"); // Prune and optimize the graph. if (auto Err = runPasses(Passes.PrePrunePasses)) return Ctx->notifyFailed(std::move(Err)); LLVM_DEBUG({ - dbgs() << "Link graph \"" << G->getName() << "\" pre-pruning:\n"; + dbgs() << "Link graph pre-pruning:\n"; G->dump(dbgs()); }); prune(*G); LLVM_DEBUG({ - dbgs() << "Link graph \"" << G->getName() << "\" post-pruning:\n"; + dbgs() << "Link graph post-pruning:\n"; G->dump(dbgs()); }); @@ -67,14 +65,15 @@ void JITLinkerBase::linkPhase1(std::unique_ptr<JITLinkerBase> Self) { void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self, AllocResult AR) { + LLVM_DEBUG(dbgs() << "Starting link phase 2\n"); + if (AR) Alloc = std::move(*AR); else return Ctx->notifyFailed(AR.takeError()); LLVM_DEBUG({ - dbgs() << "Link graph \"" << G->getName() - << "\" before post-allocation passes:\n"; + dbgs() << "Link graph before post-allocation passes:\n"; G->dump(dbgs()); }); @@ -131,9 +130,7 @@ void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self, void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self, Expected<AsyncLookupResult> LR) { - LLVM_DEBUG({ - dbgs() << "Starting link phase 3 for graph " << G->getName() << "\n"; - }); + LLVM_DEBUG(dbgs() << "Starting link phase 3\n"); // If the lookup failed, bail out. if (!LR) @@ -143,8 +140,7 @@ void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self, applyLookupResult(*LR); LLVM_DEBUG({ - dbgs() << "Link graph \"" << G->getName() - << "\" before pre-fixup passes:\n"; + dbgs() << "Link graph before pre-fixup passes:\n"; G->dump(dbgs()); }); @@ -152,7 +148,7 @@ void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self, return abandonAllocAndBailOut(std::move(Self), std::move(Err)); LLVM_DEBUG({ - dbgs() << "Link graph \"" << G->getName() << "\" before copy-and-fixup:\n"; + dbgs() << "Link graph before copy-and-fixup:\n"; G->dump(dbgs()); }); @@ -161,7 +157,7 @@ void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self, return abandonAllocAndBailOut(std::move(Self), std::move(Err)); LLVM_DEBUG({ - dbgs() << "Link graph \"" << G->getName() << "\" after copy-and-fixup:\n"; + dbgs() << "Link graph after copy-and-fixup:\n"; G->dump(dbgs()); }); @@ -186,16 +182,14 @@ void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self, void JITLinkerBase::linkPhase4(std::unique_ptr<JITLinkerBase> Self, FinalizeResult FR) { - LLVM_DEBUG({ - dbgs() << "Starting link phase 4 for graph " << G->getName() << "\n"; - }); + LLVM_DEBUG(dbgs() << "Starting link phase 4\n"); if (!FR) return Ctx->notifyFailed(FR.takeError()); Ctx->notifyFinalized(std::move(*FR)); - LLVM_DEBUG({ dbgs() << "Link of graph " << G->getName() << " complete\n"; }); + LLVM_DEBUG({ dbgs() << "Link complete\n"; }); } Error JITLinkerBase::runPasses(LinkGraphPassList &Passes) { diff --git a/llvm/lib/IR/Assumptions.cpp b/llvm/lib/IR/Assumptions.cpp index f8bbcb3..3397f0e 100644 --- a/llvm/lib/IR/Assumptions.cpp +++ b/llvm/lib/IR/Assumptions.cpp @@ -20,9 +20,8 @@ using namespace llvm; -namespace { -bool hasAssumption(const Attribute &A, - const KnownAssumptionString &AssumptionStr) { +static bool hasAssumption(const Attribute &A, + const KnownAssumptionString &AssumptionStr) { if (!A.isValid()) return false; assert(A.isStringAttribute() && "Expected a string attribute!"); @@ -33,7 +32,7 @@ bool hasAssumption(const Attribute &A, return llvm::is_contained(Strings, AssumptionStr); } -DenseSet<StringRef> getAssumptions(const Attribute &A) { +static DenseSet<StringRef> getAssumptions(const Attribute &A) { if (!A.isValid()) return DenseSet<StringRef>(); assert(A.isStringAttribute() && "Expected a string attribute!"); @@ -47,8 +46,8 @@ DenseSet<StringRef> getAssumptions(const Attribute &A) { } template <typename AttrSite> -bool addAssumptionsImpl(AttrSite &Site, - const DenseSet<StringRef> &Assumptions) { +static bool addAssumptionsImpl(AttrSite &Site, + const DenseSet<StringRef> &Assumptions) { if (Assumptions.empty()) return false; @@ -64,7 +63,6 @@ bool addAssumptionsImpl(AttrSite &Site, return true; } -} // namespace bool llvm::hasAssumption(const Function &F, const KnownAssumptionString &AssumptionStr) { diff --git a/llvm/lib/IR/DebugInfoMetadata.cpp b/llvm/lib/IR/DebugInfoMetadata.cpp index 1ededb9e7..77d044b 100644 --- a/llvm/lib/IR/DebugInfoMetadata.cpp +++ b/llvm/lib/IR/DebugInfoMetadata.cpp @@ -1768,6 +1768,7 @@ bool DIExpression::isValid() const { case dwarf::DW_OP_bregx: case dwarf::DW_OP_push_object_address: case dwarf::DW_OP_over: + case dwarf::DW_OP_rot: case dwarf::DW_OP_consts: case dwarf::DW_OP_eq: case dwarf::DW_OP_ne: @@ -1775,6 +1776,8 @@ bool DIExpression::isValid() const { case dwarf::DW_OP_ge: case dwarf::DW_OP_lt: case dwarf::DW_OP_le: + case dwarf::DW_OP_neg: + case dwarf::DW_OP_abs: break; } } diff --git a/llvm/lib/IR/DiagnosticHandler.cpp b/llvm/lib/IR/DiagnosticHandler.cpp index 683eade..eb2fe3b 100644 --- a/llvm/lib/IR/DiagnosticHandler.cpp +++ b/llvm/lib/IR/DiagnosticHandler.cpp @@ -36,6 +36,7 @@ struct PassRemarksOpt { } } }; +} // namespace static PassRemarksOpt PassRemarksPassedOptLoc; static PassRemarksOpt PassRemarksMissedOptLoc; @@ -66,7 +67,6 @@ static cl::opt<PassRemarksOpt, true, cl::parser<std::string>> "Enable optimization analysis remarks from passes whose name match " "the given regular expression"), cl::Hidden, cl::location(PassRemarksAnalysisOptLoc), cl::ValueRequired); -} bool DiagnosticHandler::isAnalysisRemarkEnabled(StringRef PassName) const { return (PassRemarksAnalysisOptLoc.Pattern && diff --git a/llvm/lib/IR/ModuleSummaryIndex.cpp b/llvm/lib/IR/ModuleSummaryIndex.cpp index d9024b0..dc55b63 100644 --- a/llvm/lib/IR/ModuleSummaryIndex.cpp +++ b/llvm/lib/IR/ModuleSummaryIndex.cpp @@ -409,7 +409,7 @@ struct Edge { GlobalValue::GUID Src; GlobalValue::GUID Dst; }; -} +} // namespace void Attributes::add(const Twine &Name, const Twine &Value, const Twine &Comment) { diff --git a/llvm/lib/IR/PassInstrumentation.cpp b/llvm/lib/IR/PassInstrumentation.cpp index 70bbe8f..52aad8f 100644 --- a/llvm/lib/IR/PassInstrumentation.cpp +++ b/llvm/lib/IR/PassInstrumentation.cpp @@ -15,7 +15,7 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/IR/PassManager.h" -namespace llvm { +using namespace llvm; template struct LLVM_EXPORT_TEMPLATE Any::TypeId<const Module *>; template struct LLVM_EXPORT_TEMPLATE Any::TypeId<const Function *>; @@ -42,7 +42,8 @@ PassInstrumentationCallbacks::getPassNameForClassName(StringRef ClassName) { AnalysisKey PassInstrumentationAnalysis::Key; -bool isSpecialPass(StringRef PassID, const std::vector<StringRef> &Specials) { +bool llvm::isSpecialPass(StringRef PassID, + const std::vector<StringRef> &Specials) { size_t Pos = PassID.find('<'); StringRef Prefix = PassID; if (Pos != StringRef::npos) @@ -50,5 +51,3 @@ bool isSpecialPass(StringRef PassID, const std::vector<StringRef> &Specials) { return any_of(Specials, [Prefix](StringRef S) { return Prefix.ends_with(S); }); } - -} // namespace llvm diff --git a/llvm/lib/IR/ProfDataUtils.cpp b/llvm/lib/IR/ProfDataUtils.cpp index edeca97..fc2be51 100644 --- a/llvm/lib/IR/ProfDataUtils.cpp +++ b/llvm/lib/IR/ProfDataUtils.cpp @@ -24,8 +24,6 @@ using namespace llvm; -namespace { - // MD_prof nodes have the following layout // // In general: @@ -41,14 +39,15 @@ namespace { // correctly, and can change the behavior in the future if the layout changes // the minimum number of operands for MD_prof nodes with branch weights -constexpr unsigned MinBWOps = 3; +static constexpr unsigned MinBWOps = 3; // the minimum number of operands for MD_prof nodes with value profiles -constexpr unsigned MinVPOps = 5; +static constexpr unsigned MinVPOps = 5; // We may want to add support for other MD_prof types, so provide an abstraction // for checking the metadata type. -bool isTargetMD(const MDNode *ProfData, const char *Name, unsigned MinOps) { +static bool isTargetMD(const MDNode *ProfData, const char *Name, + unsigned MinOps) { // TODO: This routine may be simplified if MD_prof used an enum instead of a // string to differentiate the types of MD_prof nodes. if (!ProfData || !Name || MinOps < 2) @@ -101,14 +100,11 @@ static SmallVector<uint32_t> fitWeights(ArrayRef<uint64_t> Weights) { return Ret; } -} // namespace - -namespace llvm { -cl::opt<bool> ElideAllZeroBranchWeights("elide-all-zero-branch-weights", +static cl::opt<bool> ElideAllZeroBranchWeights("elide-all-zero-branch-weights", #if defined(LLVM_ENABLE_PROFCHECK) - cl::init(false) + cl::init(false) #else - cl::init(true) + cl::init(true) #endif ); const char *MDProfLabels::BranchWeights = "branch_weights"; @@ -118,21 +114,21 @@ const char *MDProfLabels::FunctionEntryCount = "function_entry_count"; const char *MDProfLabels::SyntheticFunctionEntryCount = "synthetic_function_entry_count"; const char *MDProfLabels::UnknownBranchWeightsMarker = "unknown"; -const char *LLVMLoopEstimatedTripCount = "llvm.loop.estimated_trip_count"; +const char *llvm::LLVMLoopEstimatedTripCount = "llvm.loop.estimated_trip_count"; -bool hasProfMD(const Instruction &I) { +bool llvm::hasProfMD(const Instruction &I) { return I.hasMetadata(LLVMContext::MD_prof); } -bool isBranchWeightMD(const MDNode *ProfileData) { +bool llvm::isBranchWeightMD(const MDNode *ProfileData) { return isTargetMD(ProfileData, MDProfLabels::BranchWeights, MinBWOps); } -bool isValueProfileMD(const MDNode *ProfileData) { +bool llvm::isValueProfileMD(const MDNode *ProfileData) { return isTargetMD(ProfileData, MDProfLabels::ValueProfile, MinVPOps); } -bool hasBranchWeightMD(const Instruction &I) { +bool llvm::hasBranchWeightMD(const Instruction &I) { auto *ProfileData = I.getMetadata(LLVMContext::MD_prof); return isBranchWeightMD(ProfileData); } @@ -147,16 +143,16 @@ static bool hasCountTypeMD(const Instruction &I) { return isa<CallBase>(I) && !isBranchWeightMD(ProfileData); } -bool hasValidBranchWeightMD(const Instruction &I) { +bool llvm::hasValidBranchWeightMD(const Instruction &I) { return getValidBranchWeightMDNode(I); } -bool hasBranchWeightOrigin(const Instruction &I) { +bool llvm::hasBranchWeightOrigin(const Instruction &I) { auto *ProfileData = I.getMetadata(LLVMContext::MD_prof); return hasBranchWeightOrigin(ProfileData); } -bool hasBranchWeightOrigin(const MDNode *ProfileData) { +bool llvm::hasBranchWeightOrigin(const MDNode *ProfileData) { if (!isBranchWeightMD(ProfileData)) return false; auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(1)); @@ -168,54 +164,54 @@ bool hasBranchWeightOrigin(const MDNode *ProfileData) { return ProfDataName != nullptr; } -unsigned getBranchWeightOffset(const MDNode *ProfileData) { +unsigned llvm::getBranchWeightOffset(const MDNode *ProfileData) { return hasBranchWeightOrigin(ProfileData) ? 2 : 1; } -unsigned getNumBranchWeights(const MDNode &ProfileData) { +unsigned llvm::getNumBranchWeights(const MDNode &ProfileData) { return ProfileData.getNumOperands() - getBranchWeightOffset(&ProfileData); } -MDNode *getBranchWeightMDNode(const Instruction &I) { +MDNode *llvm::getBranchWeightMDNode(const Instruction &I) { auto *ProfileData = I.getMetadata(LLVMContext::MD_prof); if (!isBranchWeightMD(ProfileData)) return nullptr; return ProfileData; } -MDNode *getValidBranchWeightMDNode(const Instruction &I) { +MDNode *llvm::getValidBranchWeightMDNode(const Instruction &I) { auto *ProfileData = getBranchWeightMDNode(I); if (ProfileData && getNumBranchWeights(*ProfileData) == I.getNumSuccessors()) return ProfileData; return nullptr; } -void extractFromBranchWeightMD32(const MDNode *ProfileData, - SmallVectorImpl<uint32_t> &Weights) { +void llvm::extractFromBranchWeightMD32(const MDNode *ProfileData, + SmallVectorImpl<uint32_t> &Weights) { extractFromBranchWeightMD(ProfileData, Weights); } -void extractFromBranchWeightMD64(const MDNode *ProfileData, - SmallVectorImpl<uint64_t> &Weights) { +void llvm::extractFromBranchWeightMD64(const MDNode *ProfileData, + SmallVectorImpl<uint64_t> &Weights) { extractFromBranchWeightMD(ProfileData, Weights); } -bool extractBranchWeights(const MDNode *ProfileData, - SmallVectorImpl<uint32_t> &Weights) { +bool llvm::extractBranchWeights(const MDNode *ProfileData, + SmallVectorImpl<uint32_t> &Weights) { if (!isBranchWeightMD(ProfileData)) return false; extractFromBranchWeightMD(ProfileData, Weights); return true; } -bool extractBranchWeights(const Instruction &I, - SmallVectorImpl<uint32_t> &Weights) { +bool llvm::extractBranchWeights(const Instruction &I, + SmallVectorImpl<uint32_t> &Weights) { auto *ProfileData = I.getMetadata(LLVMContext::MD_prof); return extractBranchWeights(ProfileData, Weights); } -bool extractBranchWeights(const Instruction &I, uint64_t &TrueVal, - uint64_t &FalseVal) { +bool llvm::extractBranchWeights(const Instruction &I, uint64_t &TrueVal, + uint64_t &FalseVal) { assert((I.getOpcode() == Instruction::Br || I.getOpcode() == Instruction::Select) && "Looking for branch weights on something besides branch, select, or " @@ -234,7 +230,8 @@ bool extractBranchWeights(const Instruction &I, uint64_t &TrueVal, return true; } -bool extractProfTotalWeight(const MDNode *ProfileData, uint64_t &TotalVal) { +bool llvm::extractProfTotalWeight(const MDNode *ProfileData, + uint64_t &TotalVal) { TotalVal = 0; if (!ProfileData) return false; @@ -262,11 +259,12 @@ bool extractProfTotalWeight(const MDNode *ProfileData, uint64_t &TotalVal) { return false; } -bool extractProfTotalWeight(const Instruction &I, uint64_t &TotalVal) { +bool llvm::extractProfTotalWeight(const Instruction &I, uint64_t &TotalVal) { return extractProfTotalWeight(I.getMetadata(LLVMContext::MD_prof), TotalVal); } -void setExplicitlyUnknownBranchWeights(Instruction &I, StringRef PassName) { +void llvm::setExplicitlyUnknownBranchWeights(Instruction &I, + StringRef PassName) { MDBuilder MDB(I.getContext()); I.setMetadata( LLVMContext::MD_prof, @@ -275,14 +273,16 @@ void setExplicitlyUnknownBranchWeights(Instruction &I, StringRef PassName) { MDB.createString(PassName)})); } -void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, Function &F, - StringRef PassName) { +void llvm::setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, + Function &F, + StringRef PassName) { if (std::optional<Function::ProfileCount> EC = F.getEntryCount(); EC && EC->getCount() > 0) setExplicitlyUnknownBranchWeights(I, PassName); } -void setExplicitlyUnknownFunctionEntryCount(Function &F, StringRef PassName) { +void llvm::setExplicitlyUnknownFunctionEntryCount(Function &F, + StringRef PassName) { MDBuilder MDB(F.getContext()); F.setMetadata( LLVMContext::MD_prof, @@ -291,21 +291,21 @@ void setExplicitlyUnknownFunctionEntryCount(Function &F, StringRef PassName) { MDB.createString(PassName)})); } -bool isExplicitlyUnknownProfileMetadata(const MDNode &MD) { +bool llvm::isExplicitlyUnknownProfileMetadata(const MDNode &MD) { if (MD.getNumOperands() != 2) return false; return MD.getOperand(0).equalsStr(MDProfLabels::UnknownBranchWeightsMarker); } -bool hasExplicitlyUnknownBranchWeights(const Instruction &I) { +bool llvm::hasExplicitlyUnknownBranchWeights(const Instruction &I) { auto *MD = I.getMetadata(LLVMContext::MD_prof); if (!MD) return false; return isExplicitlyUnknownProfileMetadata(*MD); } -void setBranchWeights(Instruction &I, ArrayRef<uint32_t> Weights, - bool IsExpected, bool ElideAllZero) { +void llvm::setBranchWeights(Instruction &I, ArrayRef<uint32_t> Weights, + bool IsExpected, bool ElideAllZero) { if ((ElideAllZeroBranchWeights && ElideAllZero) && llvm::all_of(Weights, [](uint32_t V) { return V == 0; })) { I.setMetadata(LLVMContext::MD_prof, nullptr); @@ -317,13 +317,14 @@ void setBranchWeights(Instruction &I, ArrayRef<uint32_t> Weights, I.setMetadata(LLVMContext::MD_prof, BranchWeights); } -void setFittedBranchWeights(Instruction &I, ArrayRef<uint64_t> Weights, - bool IsExpected, bool ElideAllZero) { +void llvm::setFittedBranchWeights(Instruction &I, ArrayRef<uint64_t> Weights, + bool IsExpected, bool ElideAllZero) { setBranchWeights(I, fitWeights(Weights), IsExpected, ElideAllZero); } -SmallVector<uint32_t> downscaleWeights(ArrayRef<uint64_t> Weights, - std::optional<uint64_t> KnownMaxCount) { +SmallVector<uint32_t> +llvm::downscaleWeights(ArrayRef<uint64_t> Weights, + std::optional<uint64_t> KnownMaxCount) { uint64_t MaxCount = KnownMaxCount.has_value() ? KnownMaxCount.value() : *llvm::max_element(Weights); assert(MaxCount > 0 && "Bad max count"); @@ -334,7 +335,7 @@ SmallVector<uint32_t> downscaleWeights(ArrayRef<uint64_t> Weights, return DownscaledWeights; } -void scaleProfData(Instruction &I, uint64_t S, uint64_t T) { +void llvm::scaleProfData(Instruction &I, uint64_t S, uint64_t T) { assert(T != 0 && "Caller should guarantee"); auto *ProfileData = I.getMetadata(LLVMContext::MD_prof); if (ProfileData == nullptr) @@ -387,5 +388,3 @@ void scaleProfData(Instruction &I, uint64_t S, uint64_t T) { } I.setMetadata(LLVMContext::MD_prof, MDNode::get(C, Vals)); } - -} // namespace llvm diff --git a/llvm/lib/IR/SafepointIRVerifier.cpp b/llvm/lib/IR/SafepointIRVerifier.cpp index e54894c..e35b5b3 100644 --- a/llvm/lib/IR/SafepointIRVerifier.cpp +++ b/llvm/lib/IR/SafepointIRVerifier.cpp @@ -196,7 +196,6 @@ protected: static void Verify(const Function &F, const DominatorTree &DT, const CFGDeadness &CD); -namespace llvm { PreservedAnalyses SafepointIRVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { const auto &DT = AM.getResult<DominatorTreeAnalysis>(F); @@ -205,7 +204,6 @@ PreservedAnalyses SafepointIRVerifierPass::run(Function &F, Verify(F, DT, CD); return PreservedAnalyses::all(); } -} // namespace llvm namespace { diff --git a/llvm/lib/IR/VFABIDemangler.cpp b/llvm/lib/IR/VFABIDemangler.cpp index 2de05a5..4fcf436 100644 --- a/llvm/lib/IR/VFABIDemangler.cpp +++ b/llvm/lib/IR/VFABIDemangler.cpp @@ -20,15 +20,16 @@ using namespace llvm; #define DEBUG_TYPE "vfabi-demangler" -namespace { /// Utilities for the Vector Function ABI name parser. +namespace { /// Return types for the parser functions. enum class ParseRet { OK, // Found. None, // Not found. Error // Syntax error. }; +} // namespace /// Extracts the `<isa>` information from the mangled string, and /// sets the `ISA` accordingly. If successful, the <isa> token is removed @@ -372,7 +373,6 @@ getScalableECFromSignature(const FunctionType *Signature, const VFISAKind ISA, return std::nullopt; } -} // namespace // Format of the ABI name: // _ZGV<isa><mask><vlen><parameters>_<scalarname>[(<redirection>)] diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp index a347609..b775cbb 100644 --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -622,6 +622,7 @@ enum PointerStripKind { PSK_InBoundsConstantIndices, PSK_InBounds }; +} // end anonymous namespace template <PointerStripKind StripKind> static void NoopCallback(const Value *) {} @@ -696,7 +697,6 @@ static const Value *stripPointerCastsAndOffsets( return V; } -} // end anonymous namespace const Value *Value::stripPointerCasts() const { return stripPointerCastsAndOffsets<PSK_ZeroIndices>(this); diff --git a/llvm/lib/Object/BuildID.cpp b/llvm/lib/Object/BuildID.cpp index 89d6bc3..d1ee597 100644 --- a/llvm/lib/Object/BuildID.cpp +++ b/llvm/lib/Object/BuildID.cpp @@ -24,6 +24,24 @@ using namespace llvm::object; namespace { template <typename ELFT> BuildIDRef getBuildID(const ELFFile<ELFT> &Obj) { + auto findBuildID = [&Obj](const auto &ShdrOrPhdr, + uint64_t Alignment) -> std::optional<BuildIDRef> { + Error Err = Error::success(); + for (auto N : Obj.notes(ShdrOrPhdr, Err)) + if (N.getType() == ELF::NT_GNU_BUILD_ID && + N.getName() == ELF::ELF_NOTE_GNU) + return N.getDesc(Alignment); + consumeError(std::move(Err)); + return std::nullopt; + }; + + auto Sections = cantFail(Obj.sections()); + for (const auto &S : Sections) { + if (S.sh_type != ELF::SHT_NOTE) + continue; + if (std::optional<BuildIDRef> ShdrRes = findBuildID(S, S.sh_addralign)) + return ShdrRes.value(); + } auto PhdrsOrErr = Obj.program_headers(); if (!PhdrsOrErr) { consumeError(PhdrsOrErr.takeError()); @@ -32,12 +50,8 @@ template <typename ELFT> BuildIDRef getBuildID(const ELFFile<ELFT> &Obj) { for (const auto &P : *PhdrsOrErr) { if (P.p_type != ELF::PT_NOTE) continue; - Error Err = Error::success(); - for (auto N : Obj.notes(P, Err)) - if (N.getType() == ELF::NT_GNU_BUILD_ID && - N.getName() == ELF::ELF_NOTE_GNU) - return N.getDesc(P.p_align); - consumeError(std::move(Err)); + if (std::optional<BuildIDRef> PhdrRes = findBuildID(P, P.p_align)) + return PhdrRes.value(); } return {}; } diff --git a/llvm/lib/Support/APFloat.cpp b/llvm/lib/Support/APFloat.cpp index d14abb4..8623c06 100644 --- a/llvm/lib/Support/APFloat.cpp +++ b/llvm/lib/Support/APFloat.cpp @@ -5857,7 +5857,7 @@ DoubleAPFloat frexp(const DoubleAPFloat &Arg, int &Exp, // practice. if (Exp == APFloat::IEK_NaN) { DoubleAPFloat Quiet{Arg}; - Quiet.getFirst().makeQuiet(); + Quiet.getFirst() = Quiet.getFirst().makeQuiet(); return Quiet; } diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp index 79655e1..0f4bbfc3 100644 --- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp +++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp @@ -1610,7 +1610,8 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB, int BaseOffset = -AFI->getTaggedBasePointerOffset(); Register FrameReg; StackOffset FrameRegOffset = TFI->resolveFrameOffsetReference( - MF, BaseOffset, false /*isFixed*/, false /*isSVE*/, FrameReg, + MF, BaseOffset, false /*isFixed*/, TargetStackID::Default /*StackID*/, + FrameReg, /*PreferFP=*/false, /*ForSimm=*/true); Register SrcReg = FrameReg; diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index 20b0d69..4357264d 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -56,15 +56,20 @@ // | async context if needed | // | (a.k.a. "frame record") | // |-----------------------------------| <- fp(=x29) -// | <hazard padding> | -// |-----------------------------------| -// | | -// | callee-saved fp/simd/SVE regs | -// | | -// |-----------------------------------| -// | | -// | SVE stack objects | -// | | +// Default SVE stack layout Split SVE objects +// (aarch64-split-sve-objects=false) (aarch64-split-sve-objects=true) +// |-----------------------------------| |-----------------------------------| +// | <hazard padding> | | callee-saved PPR registers | +// |-----------------------------------| |-----------------------------------| +// | | | PPR stack objects | +// | callee-saved fp/simd/SVE regs | |-----------------------------------| +// | | | <hazard padding> | +// |-----------------------------------| |-----------------------------------| +// | | | callee-saved ZPR/FPR registers | +// | SVE stack objects | |-----------------------------------| +// | | | ZPR stack objects | +// |-----------------------------------| |-----------------------------------| +// ^ NB: FPR CSRs are promoted to ZPRs // |-----------------------------------| // |.empty.space.to.make.part.below....| // |.aligned.in.case.it.needs.more.than| (size of this area is unknown at @@ -274,6 +279,11 @@ static cl::opt<bool> OrderFrameObjects("aarch64-order-frame-objects", cl::desc("sort stack allocations"), cl::init(true), cl::Hidden); +static cl::opt<bool> + SplitSVEObjects("aarch64-split-sve-objects", + cl::desc("Split allocation of ZPR & PPR objects"), + cl::init(true), cl::Hidden); + cl::opt<bool> EnableHomogeneousPrologEpilog( "homogeneous-prolog-epilog", cl::Hidden, cl::desc("Emit homogeneous prologue and epilogue for the size " @@ -324,6 +334,40 @@ AArch64FrameLowering::getArgumentStackToRestore(MachineFunction &MF, static bool produceCompactUnwindFrame(const AArch64FrameLowering &, MachineFunction &MF); +enum class AssignObjectOffsets { No, Yes }; +/// Process all the SVE stack objects and the SVE stack size and offsets for +/// each object. If AssignOffsets is "Yes", the offsets get assigned (and SVE +/// stack sizes set). Returns the size of the SVE stack. +static SVEStackSizes determineSVEStackSizes(MachineFunction &MF, + AssignObjectOffsets AssignOffsets); + +static unsigned getStackHazardSize(const MachineFunction &MF) { + return MF.getSubtarget<AArch64Subtarget>().getStreamingHazardSize(); +} + +/// Returns true if PPRs are spilled as ZPRs. +static bool arePPRsSpilledAsZPR(const MachineFunction &MF) { + return MF.getSubtarget().getRegisterInfo()->getSpillSize( + AArch64::PPRRegClass) == 16; +} + +StackOffset +AArch64FrameLowering::getZPRStackSize(const MachineFunction &MF) const { + const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); + return StackOffset::getScalable(AFI->getStackSizeZPR()); +} + +StackOffset +AArch64FrameLowering::getPPRStackSize(const MachineFunction &MF) const { + // With split SVE objects, the hazard padding is added to the PPR region, + // which places it between the [GPR, PPR] area and the [ZPR, FPR] area. This + // avoids hazards between both GPRs and FPRs and ZPRs and PPRs. + const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); + return StackOffset::get(AFI->hasSplitSVEObjects() ? getStackHazardSize(MF) + : 0, + AFI->getStackSizePPR()); +} + // Conservatively, returns true if the function is likely to have SVE vectors // on the stack. This function is safe to be called before callee-saves or // object offsets have been determined. @@ -338,7 +382,7 @@ static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL, const MachineFrameInfo &MFI = MF.getFrameInfo(); for (int FI = MFI.getObjectIndexBegin(); FI < MFI.getObjectIndexEnd(); FI++) { - if (MFI.isScalableStackID(FI)) + if (MFI.hasScalableStackID(FI)) return true; } @@ -482,13 +526,6 @@ AArch64FrameLowering::getFixedObjectSize(const MachineFunction &MF, } } -/// Returns the size of the entire SVE stackframe (calleesaves + spills). -StackOffset -AArch64FrameLowering::getSVEStackSize(const MachineFunction &MF) const { - const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); - return StackOffset::getScalable((int64_t)AFI->getStackSizeSVE()); -} - bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { if (!EnableRedZone) return false; @@ -514,7 +551,7 @@ bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { !Subtarget.hasSVE(); return !(MFI.hasCalls() || hasFP(MF) || NumBytes > RedZoneSize || - getSVEStackSize(MF) || LowerQRegCopyThroughMem); + AFI->hasSVEStackSize() || LowerQRegCopyThroughMem); } /// hasFPImpl - Return true if the specified function should have a dedicated @@ -557,7 +594,7 @@ bool AArch64FrameLowering::hasFPImpl(const MachineFunction &MF) const { // CFA in either of these cases. if (AFI.needsDwarfUnwindInfo(MF) && ((requiresSaveVG(MF) || AFI.getSMEFnAttrs().hasStreamingBody()) && - (!AFI.hasCalculatedStackSizeSVE() || AFI.getStackSizeSVE() > 0))) + (!AFI.hasCalculatedStackSizeSVE() || AFI.hasSVEStackSize()))) return true; // With large callframes around we may need to use FP to access the scavenging // emergency spillslot. @@ -1126,10 +1163,6 @@ static bool isTargetWindows(const MachineFunction &MF) { return MF.getSubtarget<AArch64Subtarget>().isTargetWindows(); } -static unsigned getStackHazardSize(const MachineFunction &MF) { - return MF.getSubtarget<AArch64Subtarget>().getStreamingHazardSize(); -} - void AArch64FrameLowering::emitPacRetPlusLeafHardening( MachineFunction &MF) const { const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); @@ -1212,7 +1245,9 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF, const auto &MFI = MF.getFrameInfo(); int64_t ObjectOffset = MFI.getObjectOffset(FI); - StackOffset SVEStackSize = getSVEStackSize(MF); + StackOffset ZPRStackSize = getZPRStackSize(MF); + StackOffset PPRStackSize = getPPRStackSize(MF); + StackOffset SVEStackSize = ZPRStackSize + PPRStackSize; // For VLA-area objects, just emit an offset at the end of the stack frame. // Whilst not quite correct, these objects do live at the end of the frame and @@ -1228,11 +1263,21 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF, const auto *AFI = MF.getInfo<AArch64FunctionInfo>(); bool FPAfterSVECalleeSaves = isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize(); - if (MFI.isScalableStackID(FI)) { + if (MFI.hasScalableStackID(FI)) { if (FPAfterSVECalleeSaves && - -ObjectOffset <= (int64_t)AFI->getSVECalleeSavedStackSize()) + -ObjectOffset <= (int64_t)AFI->getSVECalleeSavedStackSize()) { + assert(!AFI->hasSplitSVEObjects() && + "split-sve-objects not supported with FPAfterSVECalleeSaves"); return StackOffset::getScalable(ObjectOffset); - return StackOffset::get(-((int64_t)AFI->getCalleeSavedStackSize()), + } + StackOffset AccessOffset{}; + // The scalable vectors are below (lower address) the scalable predicates + // with split SVE objects, so we must subtract the size of the predicates. + if (AFI->hasSplitSVEObjects() && + MFI.getStackID(FI) == TargetStackID::ScalableVector) + AccessOffset = -PPRStackSize; + return AccessOffset + + StackOffset::get(-((int64_t)AFI->getCalleeSavedStackSize()), ObjectOffset); } @@ -1294,14 +1339,15 @@ StackOffset AArch64FrameLowering::resolveFrameIndexReference( const auto &MFI = MF.getFrameInfo(); int64_t ObjectOffset = MFI.getObjectOffset(FI); bool isFixed = MFI.isFixedObjectIndex(FI); - bool isSVE = MFI.isScalableStackID(FI); - return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, isSVE, FrameReg, - PreferFP, ForSimm); + auto StackID = static_cast<TargetStackID::Value>(MFI.getStackID(FI)); + return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, StackID, + FrameReg, PreferFP, ForSimm); } StackOffset AArch64FrameLowering::resolveFrameOffsetReference( - const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE, - Register &FrameReg, bool PreferFP, bool ForSimm) const { + const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, + TargetStackID::Value StackID, Register &FrameReg, bool PreferFP, + bool ForSimm) const { const auto &MFI = MF.getFrameInfo(); const auto *RegInfo = static_cast<const AArch64RegisterInfo *>( MF.getSubtarget().getRegisterInfo()); @@ -1312,8 +1358,11 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference( int64_t Offset = getStackOffset(MF, ObjectOffset).getFixed(); bool isCSR = !isFixed && ObjectOffset >= -((int)AFI->getCalleeSavedStackSize(MFI)); + bool isSVE = MFI.isScalableStackID(StackID); - const StackOffset &SVEStackSize = getSVEStackSize(MF); + StackOffset ZPRStackSize = getZPRStackSize(MF); + StackOffset PPRStackSize = getPPRStackSize(MF); + StackOffset SVEStackSize = ZPRStackSize + PPRStackSize; // Use frame pointer to reference fixed objects. Use it for locals if // there are VLAs or a dynamically realigned SP (and thus the SP isn't @@ -1388,12 +1437,25 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference( isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize(); if (isSVE) { - StackOffset FPOffset = - StackOffset::get(-AFI->getCalleeSaveBaseToFrameRecordOffset(), ObjectOffset); + StackOffset FPOffset = StackOffset::get( + -AFI->getCalleeSaveBaseToFrameRecordOffset(), ObjectOffset); StackOffset SPOffset = SVEStackSize + StackOffset::get(MFI.getStackSize() - AFI->getCalleeSavedStackSize(), ObjectOffset); + + // With split SVE objects the ObjectOffset is relative to the split area + // (i.e. the PPR area or ZPR area respectively). + if (AFI->hasSplitSVEObjects() && StackID == TargetStackID::ScalableVector) { + // If we're accessing an SVE vector with split SVE objects... + // - From the FP we need to move down past the PPR area: + FPOffset -= PPRStackSize; + // - From the SP we only need to move up to the ZPR area: + SPOffset -= PPRStackSize; + // Note: `SPOffset = SVEStackSize + ...`, so `-= PPRStackSize` results in + // `SPOffset = ZPRStackSize + ...`. + } + if (FPAfterSVECalleeSaves) { FPOffset += StackOffset::getScalable(AFI->getSVECalleeSavedStackSize()); if (-ObjectOffset <= (int64_t)AFI->getSVECalleeSavedStackSize()) { @@ -1401,6 +1463,7 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference( SPOffset += StackOffset::getFixed(AFI->getCalleeSavedStackSize()); } } + // Always use the FP for SVE spills if available and beneficial. if (hasFP(MF) && (SPOffset.getFixed() || FPOffset.getScalable() < SPOffset.getScalable() || @@ -1408,13 +1471,13 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference( FrameReg = RegInfo->getFrameRegister(MF); return FPOffset; } - FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister() : (unsigned)AArch64::SP; + return SPOffset; } - StackOffset ScalableOffset = {}; + StackOffset SVEAreaOffset = {}; if (FPAfterSVECalleeSaves) { // In this stack layout, the FP is in between the callee saves and other // SVE allocations. @@ -1422,25 +1485,25 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference( StackOffset::getScalable(AFI->getSVECalleeSavedStackSize()); if (UseFP) { if (isFixed) - ScalableOffset = SVECalleeSavedStack; + SVEAreaOffset = SVECalleeSavedStack; else if (!isCSR) - ScalableOffset = SVECalleeSavedStack - SVEStackSize; + SVEAreaOffset = SVECalleeSavedStack - SVEStackSize; } else { if (isFixed) - ScalableOffset = SVEStackSize; + SVEAreaOffset = SVEStackSize; else if (isCSR) - ScalableOffset = SVEStackSize - SVECalleeSavedStack; + SVEAreaOffset = SVEStackSize - SVECalleeSavedStack; } } else { if (UseFP && !(isFixed || isCSR)) - ScalableOffset = -SVEStackSize; + SVEAreaOffset = -SVEStackSize; if (!UseFP && (isFixed || isCSR)) - ScalableOffset = SVEStackSize; + SVEAreaOffset = SVEStackSize; } if (UseFP) { FrameReg = RegInfo->getFrameRegister(MF); - return StackOffset::getFixed(FPOffset) + ScalableOffset; + return StackOffset::getFixed(FPOffset) + SVEAreaOffset; } // Use the base pointer if we have one. @@ -1457,7 +1520,7 @@ StackOffset AArch64FrameLowering::resolveFrameOffsetReference( Offset -= AFI->getLocalStackSize(); } - return StackOffset::getFixed(Offset) + ScalableOffset; + return StackOffset::getFixed(Offset) + SVEAreaOffset; } static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) { @@ -1614,11 +1677,25 @@ void computeCalleeSaveRegisterPairs(const AArch64FrameLowering &AFL, RegInc = -1; FirstReg = Count - 1; } + bool FPAfterSVECalleeSaves = IsWindows && AFI->getSVECalleeSavedStackSize(); - int ScalableByteOffset = - FPAfterSVECalleeSaves ? 0 : AFI->getSVECalleeSavedStackSize(); + + int ZPRByteOffset = 0; + int PPRByteOffset = 0; + bool SplitPPRs = AFI->hasSplitSVEObjects(); + if (SplitPPRs) { + ZPRByteOffset = AFI->getZPRCalleeSavedStackSize(); + PPRByteOffset = AFI->getPPRCalleeSavedStackSize(); + } else if (!FPAfterSVECalleeSaves) { + ZPRByteOffset = + AFI->getZPRCalleeSavedStackSize() + AFI->getPPRCalleeSavedStackSize(); + // Unused: Everything goes in ZPR space. + PPRByteOffset = 0; + } + bool NeedGapToAlignStack = AFI->hasCalleeSaveStackFreeSpace(); Register LastReg = 0; + bool HasCSHazardPadding = AFI->hasStackHazardSlotIndex() && !SplitPPRs; // When iterating backwards, the loop condition relies on unsigned wraparound. for (unsigned i = FirstReg; i < Count; i += RegInc) { @@ -1647,8 +1724,12 @@ void computeCalleeSaveRegisterPairs(const AArch64FrameLowering &AFL, llvm_unreachable("Unsupported register class."); } + int &ScalableByteOffset = RPI.Type == RegPairInfo::PPR && SplitPPRs + ? PPRByteOffset + : ZPRByteOffset; + // Add the stack hazard size as we transition from GPR->FPR CSRs. - if (AFI->hasStackHazardSlotIndex() && + if (HasCSHazardPadding && (!LastReg || !AArch64InstrInfo::isFpOrNEON(LastReg)) && AArch64InstrInfo::isFpOrNEON(RPI.Reg1)) ByteOffset += StackFillDir * StackHazardSize; @@ -1656,7 +1737,7 @@ void computeCalleeSaveRegisterPairs(const AArch64FrameLowering &AFL, int Scale = TRI->getSpillSize(*RPI.RC); // Add the next reg to the pair if it is in the same register class. - if (unsigned(i + RegInc) < Count && !AFI->hasStackHazardSlotIndex()) { + if (unsigned(i + RegInc) < Count && !HasCSHazardPadding) { MCRegister NextReg = CSI[i + RegInc].getReg(); bool IsFirst = i == FirstReg; switch (RPI.Type) { @@ -2203,6 +2284,13 @@ static std::optional<int> getLdStFrameID(const MachineInstr &MI, return getMMOFrameID(*MI.memoperands_begin(), MFI); } +// Returns true if the LDST MachineInstr \p MI is a PPR access. +static bool isPPRAccess(const MachineInstr &MI) { + return MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO && + MI.getOpcode() != AArch64::FILL_PPR_FROM_ZPR_SLOT_PSEUDO && + AArch64::PPRRegClass.contains(MI.getOperand(0).getReg()); +} + // Check if a Hazard slot is needed for the current function, and if so create // one for it. The index is stored in AArch64FunctionInfo->StackHazardSlotIndex, // which can be used to determine if any hazard padding is needed. @@ -2226,25 +2314,50 @@ void AArch64FrameLowering::determineStackHazardSlot( bool HasFPRCSRs = any_of(SavedRegs.set_bits(), [](unsigned Reg) { return AArch64::FPR64RegClass.contains(Reg) || AArch64::FPR128RegClass.contains(Reg) || - AArch64::ZPRRegClass.contains(Reg) || - AArch64::PPRRegClass.contains(Reg); + AArch64::ZPRRegClass.contains(Reg); + }); + bool HasPPRCSRs = any_of(SavedRegs.set_bits(), [](unsigned Reg) { + return AArch64::PPRRegClass.contains(Reg); }); bool HasFPRStackObjects = false; - if (!HasFPRCSRs) { - std::vector<unsigned> FrameObjects(MFI.getObjectIndexEnd()); + bool HasPPRStackObjects = false; + if (!HasFPRCSRs || SplitSVEObjects) { + enum SlotType : uint8_t { + Unknown = 0, + ZPRorFPR = 1 << 0, + PPR = 1 << 1, + GPR = 1 << 2, + LLVM_MARK_AS_BITMASK_ENUM(GPR) + }; + + // Find stack slots solely used for one kind of register (ZPR, PPR, etc.), + // based on the kinds of accesses used in the function. + SmallVector<SlotType> SlotTypes(MFI.getObjectIndexEnd(), SlotType::Unknown); for (auto &MBB : MF) { for (auto &MI : MBB) { std::optional<int> FI = getLdStFrameID(MI, MFI); - if (FI && *FI >= 0 && *FI < (int)FrameObjects.size()) { - if (MFI.isScalableStackID(*FI) || AArch64InstrInfo::isFpOrNEON(MI)) - FrameObjects[*FI] |= 2; - else - FrameObjects[*FI] |= 1; + if (!FI || FI < 0 || FI > int(SlotTypes.size())) + continue; + if (MFI.hasScalableStackID(*FI)) { + SlotTypes[*FI] |= + isPPRAccess(MI) ? SlotType::PPR : SlotType::ZPRorFPR; + } else { + SlotTypes[*FI] |= AArch64InstrInfo::isFpOrNEON(MI) + ? SlotType::ZPRorFPR + : SlotType::GPR; } } } - HasFPRStackObjects = - any_of(FrameObjects, [](unsigned B) { return (B & 3) == 2; }); + + for (int FI = 0; FI < int(SlotTypes.size()); ++FI) { + HasFPRStackObjects |= SlotTypes[FI] == SlotType::ZPRorFPR; + // For SplitSVEObjects remember that this stack slot is a predicate, this + // will be needed later when determining the frame layout. + if (SlotTypes[FI] == SlotType::PPR) { + MFI.setStackID(FI, TargetStackID::ScalablePredicateVector); + HasPPRStackObjects = true; + } + } } if (HasFPRCSRs || HasFPRStackObjects) { @@ -2253,6 +2366,78 @@ void AArch64FrameLowering::determineStackHazardSlot( << StackHazardSize << "\n"); AFI->setStackHazardSlotIndex(ID); } + + // Determine if we should use SplitSVEObjects. This should only be used if + // there's a possibility of a stack hazard between PPRs and ZPRs or FPRs. + if (SplitSVEObjects) { + if (!HasPPRCSRs && !HasPPRStackObjects) { + LLVM_DEBUG( + dbgs() << "Not using SplitSVEObjects as no PPRs are on the stack\n"); + return; + } + + if (!HasFPRCSRs && !HasFPRStackObjects) { + LLVM_DEBUG( + dbgs() + << "Not using SplitSVEObjects as no FPRs or ZPRs are on the stack\n"); + return; + } + + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + if (MFI.hasVarSizedObjects() || TRI->hasStackRealignment(MF)) { + LLVM_DEBUG(dbgs() << "SplitSVEObjects is not supported with variable " + "sized objects or realignment\n"); + return; + } + + if (arePPRsSpilledAsZPR(MF)) { + LLVM_DEBUG(dbgs() << "SplitSVEObjects is not supported with " + "-aarch64-enable-zpr-predicate-spills"); + return; + } + + // If another calling convention is explicitly set FPRs can't be promoted to + // ZPR callee-saves. + if (!is_contained({CallingConv::C, CallingConv::Fast, + CallingConv::AArch64_SVE_VectorCall}, + MF.getFunction().getCallingConv())) { + LLVM_DEBUG( + dbgs() << "Calling convention is not supported with SplitSVEObjects"); + return; + } + + [[maybe_unused]] const AArch64Subtarget &Subtarget = + MF.getSubtarget<AArch64Subtarget>(); + assert(Subtarget.isSVEorStreamingSVEAvailable() && + "Expected SVE to be available for PPRs"); + + // With SplitSVEObjects the CS hazard padding is placed between the + // PPRs and ZPRs. If there are any FPR CS there would be a hazard between + // them and the CS GRPs. Avoid this by promoting all FPR CS to ZPRs. + BitVector FPRZRegs(SavedRegs.size()); + for (size_t Reg = 0, E = SavedRegs.size(); HasFPRCSRs && Reg < E; ++Reg) { + BitVector::reference RegBit = SavedRegs[Reg]; + if (!RegBit) + continue; + unsigned SubRegIdx = 0; + if (AArch64::FPR64RegClass.contains(Reg)) + SubRegIdx = AArch64::dsub; + else if (AArch64::FPR128RegClass.contains(Reg)) + SubRegIdx = AArch64::zsub; + else + continue; + // Clear the bit for the FPR save. + RegBit = false; + // Mark that we should save the corresponding ZPR. + Register ZReg = + TRI->getMatchingSuperReg(Reg, SubRegIdx, &AArch64::ZPRRegClass); + FPRZRegs.set(ZReg); + } + SavedRegs |= FPRZRegs; + + AFI->setSplitSVEObjects(true); + LLVM_DEBUG(dbgs() << "SplitSVEObjects enabled!\n"); + } } void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, @@ -2263,10 +2448,11 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, if (MF.getFunction().getCallingConv() == CallingConv::GHC) return; + const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); + TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>( MF.getSubtarget().getRegisterInfo()); - const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); unsigned UnspilledCSGPR = AArch64::NoRegister; unsigned UnspilledCSGPRPaired = AArch64::NoRegister; @@ -2385,17 +2571,26 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, SavedRegs.set(AArch64::X18); } + // Determine if a Hazard slot should be used and where it should go. + // If SplitSVEObjects is used, the hazard padding is placed between the PPRs + // and ZPRs. Otherwise, it goes in the callee save area. + determineStackHazardSlot(MF, SavedRegs); + // Calculates the callee saved stack size. unsigned CSStackSize = 0; - unsigned SVECSStackSize = 0; + unsigned ZPRCSStackSize = 0; + unsigned PPRCSStackSize = 0; const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); for (unsigned Reg : SavedRegs.set_bits()) { auto *RC = TRI->getMinimalPhysRegClass(Reg); assert(RC && "expected register class!"); auto SpillSize = TRI->getSpillSize(*RC); - if (AArch64::PPRRegClass.contains(Reg) || - AArch64::ZPRRegClass.contains(Reg)) - SVECSStackSize += SpillSize; + bool IsZPR = AArch64::ZPRRegClass.contains(Reg); + bool IsPPR = !IsZPR && AArch64::PPRRegClass.contains(Reg); + if (IsZPR || (IsPPR && arePPRsSpilledAsZPR(MF))) + ZPRCSStackSize += SpillSize; + else if (IsPPR) + PPRCSStackSize += SpillSize; else CSStackSize += SpillSize; } @@ -2405,17 +2600,15 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, // only 64-bit GPRs can be added to SavedRegs. unsigned NumSavedRegs = SavedRegs.count(); + // If we have hazard padding in the CS area add that to the size. + if (AFI->isStackHazardIncludedInCalleeSaveArea()) + CSStackSize += getStackHazardSize(MF); + // Increase the callee-saved stack size if the function has streaming mode // changes, as we will need to spill the value of the VG register. if (requiresSaveVG(MF)) CSStackSize += 8; - // Determine if a Hazard slot should be used, and increase the CSStackSize by - // StackHazardSize if so. - determineStackHazardSlot(MF, SavedRegs); - if (AFI->hasStackHazardSlotIndex()) - CSStackSize += getStackHazardSize(MF); - // If we must call __arm_get_current_vg in the prologue preserve the LR. if (requiresSaveVG(MF) && !Subtarget.hasSVE()) SavedRegs.set(AArch64::LR); @@ -2436,8 +2629,11 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, }); // If any callee-saved registers are used, the frame cannot be eliminated. - int64_t SVEStackSize = - alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16); + auto [ZPRLocalStackSize, PPRLocalStackSize] = + determineSVEStackSizes(MF, AssignObjectOffsets::No); + uint64_t SVELocals = ZPRLocalStackSize + PPRLocalStackSize; + uint64_t SVEStackSize = + alignTo(ZPRCSStackSize + PPRCSStackSize + SVELocals, 16); bool CanEliminateFrame = (SavedRegs.count() == 0) && !SVEStackSize; // The CSR spill slots have not been allocated yet, so estimateStackSize @@ -2522,7 +2718,7 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, // instructions. AFI->setCalleeSavedStackSize(AlignedCSStackSize); AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize); - AFI->setSVECalleeSavedStackSize(alignTo(SVECSStackSize, 16)); + AFI->setSVECalleeSavedStackSize(ZPRCSStackSize, alignTo(PPRCSStackSize, 16)); } bool AArch64FrameLowering::assignCalleeSavedSpillSlots( @@ -2575,7 +2771,7 @@ bool AArch64FrameLowering::assignCalleeSavedSpillSlots( const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); // Create a hazard slot as we switch between GPR and FPR CSRs. - if (AFI->hasStackHazardSlotIndex() && + if (AFI->isStackHazardIncludedInCalleeSaveArea() && (!LastReg || !AArch64InstrInfo::isFpOrNEON(LastReg)) && AArch64InstrInfo::isFpOrNEON(Reg)) { assert(HazardSlotIndex == std::numeric_limits<int>::max() && @@ -2614,7 +2810,7 @@ bool AArch64FrameLowering::assignCalleeSavedSpillSlots( } // Add hazard slot in the case where no FPR CSRs are present. - if (AFI->hasStackHazardSlotIndex() && + if (AFI->isStackHazardIncludedInCalleeSaveArea() && HazardSlotIndex == std::numeric_limits<int>::max()) { HazardSlotIndex = MFI.CreateStackObject(StackHazardSize, Align(8), true); LLVM_DEBUG(dbgs() << "Created CSR Hazard at slot " << HazardSlotIndex @@ -2661,7 +2857,6 @@ static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, assert((Max == std::numeric_limits<int>::min() || Max + 1 == CS.getFrameIdx()) && "SVE CalleeSaves are not consecutive"); - Min = std::min(Min, CS.getFrameIdx()); Max = std::max(Max, CS.getFrameIdx()); } @@ -2669,43 +2864,64 @@ static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, return Min != std::numeric_limits<int>::max(); } -// Process all the SVE stack objects and determine offsets for each -// object. If AssignOffsets is true, the offsets get assigned. -// Fills in the first and last callee-saved frame indices into -// Min/MaxCSFrameIndex, respectively. -// Returns the size of the stack. -static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, - int &MinCSFrameIndex, - int &MaxCSFrameIndex, - bool AssignOffsets) { +static SVEStackSizes determineSVEStackSizes(MachineFunction &MF, + AssignObjectOffsets AssignOffsets) { + MachineFrameInfo &MFI = MF.getFrameInfo(); + auto *AFI = MF.getInfo<AArch64FunctionInfo>(); + + SVEStackSizes SVEStack{}; + + // With SplitSVEObjects we maintain separate stack offsets for predicates + // (PPRs) and SVE vectors (ZPRs). When SplitSVEObjects is disabled predicates + // are included in the SVE vector area. + uint64_t &ZPRStackTop = SVEStack.ZPRStackSize; + uint64_t &PPRStackTop = + AFI->hasSplitSVEObjects() ? SVEStack.PPRStackSize : SVEStack.ZPRStackSize; + #ifndef NDEBUG // First process all fixed stack objects. for (int I = MFI.getObjectIndexBegin(); I != 0; ++I) - assert(!MFI.isScalableStackID(I) && + assert(!MFI.hasScalableStackID(I) && "SVE vectors should never be passed on the stack by value, only by " "reference."); #endif - auto Assign = [&MFI](int FI, int64_t Offset) { + auto AllocateObject = [&](int FI) { + uint64_t &StackTop = MFI.getStackID(FI) == TargetStackID::ScalableVector + ? ZPRStackTop + : PPRStackTop; + + // FIXME: Given that the length of SVE vectors is not necessarily a power of + // two, we'd need to align every object dynamically at runtime if the + // alignment is larger than 16. This is not yet supported. + Align Alignment = MFI.getObjectAlign(FI); + if (Alignment > Align(16)) + report_fatal_error( + "Alignment of scalable vectors > 16 bytes is not yet supported"); + + StackTop += MFI.getObjectSize(FI); + StackTop = alignTo(StackTop, Alignment); + + assert(StackTop < std::numeric_limits<int64_t>::max() && + "SVE StackTop far too large?!"); + + int64_t Offset = -int64_t(StackTop); + if (AssignOffsets == AssignObjectOffsets::Yes) + MFI.setObjectOffset(FI, Offset); + LLVM_DEBUG(dbgs() << "alloc FI(" << FI << ") at SP[" << Offset << "]\n"); - MFI.setObjectOffset(FI, Offset); }; - int64_t Offset = 0; - // Then process all callee saved slots. + int MinCSFrameIndex, MaxCSFrameIndex; if (getSVECalleeSaveSlotRange(MFI, MinCSFrameIndex, MaxCSFrameIndex)) { - // Assign offsets to the callee save slots. - for (int I = MinCSFrameIndex; I <= MaxCSFrameIndex; ++I) { - Offset += MFI.getObjectSize(I); - Offset = alignTo(Offset, MFI.getObjectAlign(I)); - if (AssignOffsets) - Assign(I, -Offset); - } + for (int FI = MinCSFrameIndex; FI <= MaxCSFrameIndex; ++FI) + AllocateObject(FI); } - // Ensure that the Callee-save area is aligned to 16bytes. - Offset = alignTo(Offset, Align(16U)); + // Ensure the CS area is 16-byte aligned. + PPRStackTop = alignTo(PPRStackTop, Align(16U)); + ZPRStackTop = alignTo(ZPRStackTop, Align(16U)); // Create a buffer of SVE objects to allocate and sort it. SmallVector<int, 8> ObjectsToAllocate; @@ -2715,50 +2931,34 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, int StackProtectorFI = -1; if (MFI.hasStackProtectorIndex()) { StackProtectorFI = MFI.getStackProtectorIndex(); - if (MFI.isScalableStackID(StackProtectorFI)) + if (MFI.getStackID(StackProtectorFI) == TargetStackID::ScalableVector) ObjectsToAllocate.push_back(StackProtectorFI); } - for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) { - if (!MFI.isScalableStackID(I)) - continue; - if (I == StackProtectorFI) + + for (int FI = 0, E = MFI.getObjectIndexEnd(); FI != E; ++FI) { + if (FI == StackProtectorFI || MFI.isDeadObjectIndex(FI)) continue; - if (MaxCSFrameIndex >= I && I >= MinCSFrameIndex) + if (MaxCSFrameIndex >= FI && FI >= MinCSFrameIndex) continue; - if (MFI.isDeadObjectIndex(I)) + + if (MFI.getStackID(FI) != TargetStackID::ScalableVector && + MFI.getStackID(FI) != TargetStackID::ScalablePredicateVector) continue; - ObjectsToAllocate.push_back(I); + ObjectsToAllocate.push_back(FI); } // Allocate all SVE locals and spills - for (unsigned FI : ObjectsToAllocate) { - Align Alignment = MFI.getObjectAlign(FI); - // FIXME: Given that the length of SVE vectors is not necessarily a power of - // two, we'd need to align every object dynamically at runtime if the - // alignment is larger than 16. This is not yet supported. - if (Alignment > Align(16)) - report_fatal_error( - "Alignment of scalable vectors > 16 bytes is not yet supported"); - - Offset = alignTo(Offset + MFI.getObjectSize(FI), Alignment); - if (AssignOffsets) - Assign(FI, -Offset); - } + for (unsigned FI : ObjectsToAllocate) + AllocateObject(FI); - return Offset; -} + PPRStackTop = alignTo(PPRStackTop, Align(16U)); + ZPRStackTop = alignTo(ZPRStackTop, Align(16U)); -int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets( - MachineFrameInfo &MFI) const { - int MinCSFrameIndex, MaxCSFrameIndex; - return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex, false); -} + if (AssignOffsets == AssignObjectOffsets::Yes) + AFI->setStackSizeSVE(SVEStack.ZPRStackSize, SVEStack.PPRStackSize); -int64_t AArch64FrameLowering::assignSVEStackObjectOffsets( - MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex) const { - return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex, - true); + return SVEStack; } /// Attempts to scavenge a register from \p ScavengeableRegs given the used @@ -3072,12 +3272,7 @@ void AArch64FrameLowering::processFunctionBeforeFrameFinalized( assert(getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown && "Upwards growing stack unsupported"); - int MinCSFrameIndex, MaxCSFrameIndex; - int64_t SVEStackSize = - assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex); - - AFI->setStackSizeSVE(alignTo(SVEStackSize, 16U)); - AFI->setMinMaxSVECSFrameIndex(MinCSFrameIndex, MaxCSFrameIndex); + (void)determineSVEStackSizes(MF, AssignObjectOffsets::Yes); // If this function isn't doing Win64-style C++ EH, we don't need to do // anything. @@ -3361,7 +3556,8 @@ void TagStoreEdit::emitCode(MachineBasicBlock::iterator &InsertI, Register Reg; FrameRegOffset = TFI->resolveFrameOffsetReference( - *MF, FirstTagStore.Offset, false /*isFixed*/, false /*isSVE*/, Reg, + *MF, FirstTagStore.Offset, false /*isFixed*/, + TargetStackID::Default /*StackID*/, Reg, /*PreferFP=*/false, /*ForSimm=*/true); FrameReg = Reg; FrameRegUpdate = std::nullopt; @@ -3599,7 +3795,7 @@ StackOffset AArch64FrameLowering::getFrameIndexReferencePreferSP( // Go to common code if we cannot provide sp + offset. if (MFI.hasVarSizedObjects() || - MF.getInfo<AArch64FunctionInfo>()->getStackSizeSVE() || + MF.getInfo<AArch64FunctionInfo>()->hasSVEStackSize() || MF.getSubtarget().getRegisterInfo()->hasStackRealignment(MF)) return getFrameIndexReference(MF, FI, FrameReg); @@ -3701,10 +3897,12 @@ bool FrameObjectCompare(const FrameObject &A, const FrameObject &B) { void AArch64FrameLowering::orderFrameObjects( const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const { - if (!OrderFrameObjects || ObjectsToAllocate.empty()) + const AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>(); + + if ((!OrderFrameObjects && !AFI.hasSplitSVEObjects()) || + ObjectsToAllocate.empty()) return; - const AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>(); const MachineFrameInfo &MFI = MF.getFrameInfo(); std::vector<FrameObject> FrameObjects(MFI.getObjectIndexEnd()); for (auto &Obj : ObjectsToAllocate) { @@ -3723,7 +3921,8 @@ void AArch64FrameLowering::orderFrameObjects( if (AFI.hasStackHazardSlotIndex()) { std::optional<int> FI = getLdStFrameID(MI, MFI); if (FI && *FI >= 0 && *FI < (int)FrameObjects.size()) { - if (MFI.isScalableStackID(*FI) || AArch64InstrInfo::isFpOrNEON(MI)) + if (MFI.getStackID(*FI) == TargetStackID::ScalableVector || + AArch64InstrInfo::isFpOrNEON(MI)) FrameObjects[*FI].Accesses |= FrameObject::AccessFPR; else FrameObjects[*FI].Accesses |= FrameObject::AccessGPR; @@ -4081,7 +4280,7 @@ void AArch64FrameLowering::emitRemarks( } unsigned RegTy = StackAccess::AccessType::GPR; - if (MFI.isScalableStackID(FrameIdx)) { + if (MFI.hasScalableStackID(FrameIdx)) { // SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO // spill/fill the predicate as a data vector (so are an FPR access). if (MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO && diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h index 20d1d6a..32a9bd8 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h @@ -24,6 +24,11 @@ class AArch64FunctionInfo; class AArch64PrologueEmitter; class AArch64EpilogueEmitter; +struct SVEStackSizes { + uint64_t ZPRStackSize{0}; + uint64_t PPRStackSize{0}; +}; + class AArch64FrameLowering : public TargetFrameLowering { public: explicit AArch64FrameLowering() @@ -64,8 +69,9 @@ public: bool ForSimm) const; StackOffset resolveFrameOffsetReference(const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, - bool isSVE, Register &FrameReg, - bool PreferFP, bool ForSimm) const; + TargetStackID::Value StackID, + Register &FrameReg, bool PreferFP, + bool ForSimm) const; bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef<CalleeSavedInfo> CSI, @@ -147,7 +153,17 @@ public: bool requiresSaveVG(const MachineFunction &MF) const; - StackOffset getSVEStackSize(const MachineFunction &MF) const; + /// Returns the size of the entire ZPR stackframe (calleesaves + spills). + StackOffset getZPRStackSize(const MachineFunction &MF) const; + + /// Returns the size of the entire PPR stackframe (calleesaves + spills + + /// hazard padding). + StackOffset getPPRStackSize(const MachineFunction &MF) const; + + /// Returns the size of the entire SVE stackframe (PPRs + ZPRs). + StackOffset getSVEStackSize(const MachineFunction &MF) const { + return getZPRStackSize(MF) + getPPRStackSize(MF); + } friend class AArch64PrologueEpilogueCommon; friend class AArch64PrologueEmitter; @@ -167,10 +183,6 @@ private: /// Returns true if CSRs should be paired. bool producePairRegisters(MachineFunction &MF) const; - int64_t estimateSVEStackObjectOffsets(MachineFrameInfo &MF) const; - int64_t assignSVEStackObjectOffsets(MachineFrameInfo &MF, - int &MinCSFrameIndex, - int &MaxCSFrameIndex) const; /// Make a determination whether a Hazard slot is used and create it if /// needed. void determineStackHazardSlot(MachineFunction &MF, diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 35bbb0c0..e7b2d20 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -7497,7 +7497,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, int FI = cast<FrameIndexSDNode>(N)->getIndex(); // We can only encode VL scaled offsets, so only fold in frame indexes // referencing SVE objects. - if (MFI.isScalableStackID(FI)) { + if (MFI.hasScalableStackID(FI)) { Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64); return true; @@ -7543,7 +7543,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, int FI = cast<FrameIndexSDNode>(Base)->getIndex(); // We can only encode VL scaled offsets, so only fold in frame indexes // referencing SVE objects. - if (MFI.isScalableStackID(FI)) + if (MFI.hasScalableStackID(FI)) Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index c2a482a..70d5ad7d 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -9256,7 +9256,7 @@ void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, (MI.getOpcode() == AArch64::ADDXri || MI.getOpcode() == AArch64::SUBXri)) { const MachineOperand &MO = MI.getOperand(1); - if (MO.isFI() && MF.getFrameInfo().isScalableStackID(MO.getIndex())) + if (MO.isFI() && MF.getFrameInfo().hasScalableStackID(MO.getIndex())) MI.addOperand(MachineOperand::CreateReg(AArch64::VG, /*IsDef=*/false, /*IsImplicit=*/true)); } @@ -29608,7 +29608,7 @@ void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const { // than doing it here in finalizeLowering. if (MFI.hasStackProtectorIndex()) { for (unsigned int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { - if (MFI.isScalableStackID(i) && + if (MFI.hasScalableStackID(i) && MFI.getObjectSSPLayout(i) != MachineFrameInfo::SSPLK_None) { MFI.setStackID(MFI.getStackProtectorIndex(), TargetStackID::ScalableVector); diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp index a81f5b3..b3c9656 100644 --- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.cpp @@ -23,12 +23,21 @@ using namespace llvm; +static std::optional<uint64_t> +getSVEStackSize(const AArch64FunctionInfo &MFI, + uint64_t (AArch64FunctionInfo::*GetStackSize)() const) { + if (!MFI.hasCalculatedStackSizeSVE()) + return std::nullopt; + return (MFI.*GetStackSize)(); +} + yaml::AArch64FunctionInfo::AArch64FunctionInfo( const llvm::AArch64FunctionInfo &MFI) : HasRedZone(MFI.hasRedZone()), - StackSizeSVE(MFI.hasCalculatedStackSizeSVE() - ? std::optional<uint64_t>(MFI.getStackSizeSVE()) - : std::nullopt), + StackSizeZPR( + getSVEStackSize(MFI, &llvm::AArch64FunctionInfo::getStackSizeZPR)), + StackSizePPR( + getSVEStackSize(MFI, &llvm::AArch64FunctionInfo::getStackSizePPR)), HasStackFrame(MFI.hasStackFrame() ? std::optional<bool>(MFI.hasStackFrame()) : std::nullopt) {} @@ -41,8 +50,9 @@ void AArch64FunctionInfo::initializeBaseYamlFields( const yaml::AArch64FunctionInfo &YamlMFI) { if (YamlMFI.HasRedZone) HasRedZone = YamlMFI.HasRedZone; - if (YamlMFI.StackSizeSVE) - setStackSizeSVE(*YamlMFI.StackSizeSVE); + if (YamlMFI.StackSizeZPR || YamlMFI.StackSizePPR) + setStackSizeSVE(YamlMFI.StackSizeZPR.value_or(0), + YamlMFI.StackSizePPR.value_or(0)); if (YamlMFI.HasStackFrame) setHasStackFrame(*YamlMFI.HasStackFrame); } diff --git a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h index 897c7e8..91e64e6 100644 --- a/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h +++ b/llvm/lib/Target/AArch64/AArch64MachineFunctionInfo.h @@ -74,13 +74,10 @@ class AArch64FunctionInfo final : public MachineFunctionInfo { /// Amount of stack frame size, not including callee-saved registers. uint64_t LocalStackSize = 0; - /// The start and end frame indices for the SVE callee saves. - int MinSVECSFrameIndex = 0; - int MaxSVECSFrameIndex = 0; - /// Amount of stack frame size used for saving callee-saved registers. unsigned CalleeSavedStackSize = 0; - unsigned SVECalleeSavedStackSize = 0; + unsigned ZPRCalleeSavedStackSize = 0; + unsigned PPRCalleeSavedStackSize = 0; bool HasCalleeSavedStackSize = false; bool HasSVECalleeSavedStackSize = false; @@ -137,9 +134,14 @@ class AArch64FunctionInfo final : public MachineFunctionInfo { /// SVE stack size (for predicates and data vectors) are maintained here /// rather than in FrameInfo, as the placement and Stack IDs are target /// specific. - uint64_t StackSizeSVE = 0; + uint64_t StackSizeZPR = 0; + uint64_t StackSizePPR = 0; + + /// Are SVE objects (vectors and predicates) split into separate regions on + /// the stack. + bool SplitSVEObjects = false; - /// HasCalculatedStackSizeSVE indicates whether StackSizeSVE is valid. + /// HasCalculatedStackSizeSVE indicates whether StackSizeZPR/PPR is valid. bool HasCalculatedStackSizeSVE = false; /// Has a value when it is known whether or not the function uses a @@ -312,16 +314,25 @@ public: TailCallReservedStack = bytes; } - bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; } - - void setStackSizeSVE(uint64_t S) { + void setStackSizeSVE(uint64_t ZPR, uint64_t PPR) { + StackSizeZPR = ZPR; + StackSizePPR = PPR; HasCalculatedStackSizeSVE = true; - StackSizeSVE = S; } - uint64_t getStackSizeSVE() const { + uint64_t getStackSizeZPR() const { + assert(hasCalculatedStackSizeSVE()); + return StackSizeZPR; + } + uint64_t getStackSizePPR() const { assert(hasCalculatedStackSizeSVE()); - return StackSizeSVE; + return StackSizePPR; + } + + bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; } + + bool hasSVEStackSize() const { + return getStackSizeZPR() > 0 || getStackSizePPR() > 0; } bool hasStackFrame() const { return HasStackFrame; } @@ -329,7 +340,6 @@ public: bool isStackRealigned() const { return StackRealigned; } void setStackRealigned(bool s) { StackRealigned = s; } - bool hasCalleeSaveStackFreeSpace() const { return CalleeSaveStackHasFreeSpace; } @@ -414,29 +424,37 @@ public: } // Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes' - void setSVECalleeSavedStackSize(unsigned Size) { - SVECalleeSavedStackSize = Size; + void setSVECalleeSavedStackSize(unsigned ZPR, unsigned PPR) { + ZPRCalleeSavedStackSize = ZPR; + PPRCalleeSavedStackSize = PPR; HasSVECalleeSavedStackSize = true; } - unsigned getSVECalleeSavedStackSize() const { + unsigned getZPRCalleeSavedStackSize() const { assert(HasSVECalleeSavedStackSize && - "SVECalleeSavedStackSize has not been calculated"); - return SVECalleeSavedStackSize; + "ZPRCalleeSavedStackSize has not been calculated"); + return ZPRCalleeSavedStackSize; } - - void setMinMaxSVECSFrameIndex(int Min, int Max) { - MinSVECSFrameIndex = Min; - MaxSVECSFrameIndex = Max; + unsigned getPPRCalleeSavedStackSize() const { + assert(HasSVECalleeSavedStackSize && + "PPRCalleeSavedStackSize has not been calculated"); + return PPRCalleeSavedStackSize; } - int getMinSVECSFrameIndex() const { return MinSVECSFrameIndex; } - int getMaxSVECSFrameIndex() const { return MaxSVECSFrameIndex; } + unsigned getSVECalleeSavedStackSize() const { + assert(!hasSplitSVEObjects() && + "ZPRs and PPRs are split. Use get[ZPR|PPR]CalleeSavedStackSize()"); + return getZPRCalleeSavedStackSize() + getPPRCalleeSavedStackSize(); + } void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; } unsigned getNumLocalDynamicTLSAccesses() const { return NumLocalDynamicTLSAccesses; } + bool isStackHazardIncludedInCalleeSaveArea() const { + return hasStackHazardSlotIndex() && !hasSplitSVEObjects(); + } + std::optional<bool> hasRedZone() const { return HasRedZone; } void setHasRedZone(bool s) { HasRedZone = s; } @@ -472,6 +490,15 @@ public: StackHazardCSRSlotIndex = Index; } + bool hasSplitSVEObjects() const { return SplitSVEObjects; } + void setSplitSVEObjects(bool s) { SplitSVEObjects = s; } + + bool hasSVE_AAPCS(const MachineFunction &MF) const { + return hasSplitSVEObjects() || isSVECC() || + MF.getFunction().getCallingConv() == + CallingConv::AArch64_SVE_VectorCall; + } + SMEAttrs getSMEFnAttrs() const { return SMEFnAttrs; } unsigned getSRetReturnReg() const { return SRetReturnReg; } @@ -611,7 +638,8 @@ private: namespace yaml { struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo { std::optional<bool> HasRedZone; - std::optional<uint64_t> StackSizeSVE; + std::optional<uint64_t> StackSizeZPR; + std::optional<uint64_t> StackSizePPR; std::optional<bool> HasStackFrame; AArch64FunctionInfo() = default; @@ -624,7 +652,8 @@ struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo { template <> struct MappingTraits<AArch64FunctionInfo> { static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) { YamlIO.mapOptional("hasRedZone", MFI.HasRedZone); - YamlIO.mapOptional("stackSizeSVE", MFI.StackSizeSVE); + YamlIO.mapOptional("stackSizeZPR", MFI.StackSizeZPR); + YamlIO.mapOptional("stackSizePPR", MFI.StackSizePPR); YamlIO.mapOptional("hasStackFrame", MFI.HasStackFrame); } }; diff --git a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp index 5da16b9..aed137c 100644 --- a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp +++ b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp @@ -48,21 +48,19 @@ bool AArch64PrologueEpilogueCommon::isVGInstruction( return Opc == TargetOpcode::COPY; } -// Convenience function to determine whether I is an SVE callee save. -static bool isSVECalleeSave(MachineBasicBlock::iterator I) { +// Convenience function to determine whether I is part of the ZPR callee saves. +static bool isPartOfZPRCalleeSaves(MachineBasicBlock::iterator I) { switch (I->getOpcode()) { default: return false; - case AArch64::PTRUE_C_B: case AArch64::LD1B_2Z_IMM: case AArch64::ST1B_2Z_IMM: case AArch64::STR_ZXI: - case AArch64::STR_PXI: case AArch64::LDR_ZXI: - case AArch64::LDR_PXI: - case AArch64::PTRUE_B: case AArch64::CPY_ZPzI_B: case AArch64::CMPNE_PPzZI_B: + case AArch64::PTRUE_C_B: + case AArch64::PTRUE_B: return I->getFlag(MachineInstr::FrameSetup) || I->getFlag(MachineInstr::FrameDestroy); case AArch64::SEH_SavePReg: @@ -71,6 +69,23 @@ static bool isSVECalleeSave(MachineBasicBlock::iterator I) { } } +// Convenience function to determine whether I is part of the PPR callee saves. +static bool isPartOfPPRCalleeSaves(MachineBasicBlock::iterator I) { + switch (I->getOpcode()) { + default: + return false; + case AArch64::STR_PXI: + case AArch64::LDR_PXI: + return I->getFlag(MachineInstr::FrameSetup) || + I->getFlag(MachineInstr::FrameDestroy); + } +} + +// Convenience function to determine whether I is part of the SVE callee saves. +static bool isPartOfSVECalleeSaves(MachineBasicBlock::iterator I) { + return isPartOfZPRCalleeSaves(I) || isPartOfPPRCalleeSaves(I); +} + AArch64PrologueEpilogueCommon::AArch64PrologueEpilogueCommon( MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL) @@ -316,7 +331,7 @@ bool AArch64PrologueEpilogueCommon::shouldCombineCSRLocalStackBump( // When there is an SVE area on the stack, always allocate the // callee-saves and spills/locals separately. - if (AFL.getSVEStackSize(MF)) + if (AFI->hasSVEStackSize()) return false; return true; @@ -639,7 +654,7 @@ void AArch64PrologueEmitter::emitPrologue() { // Now allocate space for the GPR callee saves. MachineBasicBlock::iterator MBBI = PrologueBeginI; - while (MBBI != EndI && isSVECalleeSave(MBBI)) + while (MBBI != EndI && isPartOfSVECalleeSaves(MBBI)) ++MBBI; FirstGPRSaveI = convertCalleeSaveRestoreToSPPrePostIncDec( MBBI, DL, -AFI->getCalleeSavedStackSize(), EmitAsyncCFI); @@ -669,7 +684,7 @@ void AArch64PrologueEmitter::emitPrologue() { MachineBasicBlock::iterator AfterGPRSavesI = FirstGPRSaveI; while (AfterGPRSavesI != EndI && AfterGPRSavesI->getFlag(MachineInstr::FrameSetup) && - !isSVECalleeSave(AfterGPRSavesI)) { + !isPartOfSVECalleeSaves(AfterGPRSavesI)) { if (CombineSPBump && // Only fix-up frame-setup load/store instructions. (!AFL.requiresSaveVG(MF) || !isVGInstruction(AfterGPRSavesI, TLI))) @@ -700,56 +715,105 @@ void AArch64PrologueEmitter::emitPrologue() { if (AFL.windowsRequiresStackProbe(MF, NumBytes + RealignmentPadding)) emitWindowsStackProbe(AfterGPRSavesI, DL, NumBytes, RealignmentPadding); - StackOffset SVEStackSize = AFL.getSVEStackSize(MF); - StackOffset SVECalleeSavesSize = {}, SVELocalsSize = SVEStackSize; - MachineBasicBlock::iterator CalleeSavesEnd = AfterGPRSavesI; + StackOffset PPRCalleeSavesSize = + StackOffset::getScalable(AFI->getPPRCalleeSavedStackSize()); + StackOffset ZPRCalleeSavesSize = + StackOffset::getScalable(AFI->getZPRCalleeSavedStackSize()); + StackOffset SVECalleeSavesSize = PPRCalleeSavesSize + ZPRCalleeSavesSize; + StackOffset PPRLocalsSize = AFL.getPPRStackSize(MF) - PPRCalleeSavesSize; + StackOffset ZPRLocalsSize = AFL.getZPRStackSize(MF) - ZPRCalleeSavesSize; + + std::optional<MachineBasicBlock::iterator> ZPRCalleeSavesBegin, + ZPRCalleeSavesEnd, PPRCalleeSavesBegin, PPRCalleeSavesEnd; StackOffset CFAOffset = StackOffset::getFixed((int64_t)MFI.getStackSize() - NumBytes); - - // Process the SVE callee-saves to determine what space needs to be - // allocated. MachineBasicBlock::iterator AfterSVESavesI = AfterGPRSavesI; - if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) { - LLVM_DEBUG(dbgs() << "SVECalleeSavedStackSize = " << CalleeSavedSize - << "\n"); - SVECalleeSavesSize = StackOffset::getScalable(CalleeSavedSize); - SVELocalsSize = SVEStackSize - SVECalleeSavesSize; - // Find callee save instructions in frame. - // Note: With FPAfterSVECalleeSaves the callee saves have already been - // allocated. - if (!FPAfterSVECalleeSaves) { - MachineBasicBlock::iterator CalleeSavesBegin = AfterGPRSavesI; - assert(isSVECalleeSave(CalleeSavesBegin) && "Unexpected instruction"); - while (isSVECalleeSave(AfterSVESavesI) && + if (!FPAfterSVECalleeSaves) { + // Process the SVE callee-saves to find the starts/ends of the ZPR and PPR + // areas. + PPRCalleeSavesBegin = AfterGPRSavesI; + if (PPRCalleeSavesSize) { + LLVM_DEBUG(dbgs() << "PPRCalleeSavedStackSize = " + << PPRCalleeSavesSize.getScalable() << "\n"); + + assert(isPartOfPPRCalleeSaves(*PPRCalleeSavesBegin) && + "Unexpected instruction"); + while (isPartOfPPRCalleeSaves(AfterSVESavesI) && + AfterSVESavesI != MBB.getFirstTerminator()) + ++AfterSVESavesI; + } + PPRCalleeSavesEnd = ZPRCalleeSavesBegin = AfterSVESavesI; + if (ZPRCalleeSavesSize) { + LLVM_DEBUG(dbgs() << "ZPRCalleeSavedStackSize = " + << ZPRCalleeSavesSize.getScalable() << "\n"); + assert(isPartOfZPRCalleeSaves(*ZPRCalleeSavesBegin) && + "Unexpected instruction"); + while (isPartOfZPRCalleeSaves(AfterSVESavesI) && AfterSVESavesI != MBB.getFirstTerminator()) ++AfterSVESavesI; - CalleeSavesEnd = AfterSVESavesI; - - StackOffset LocalsSize = SVELocalsSize + StackOffset::getFixed(NumBytes); - // Allocate space for the callee saves (if any). - allocateStackSpace(CalleeSavesBegin, 0, SVECalleeSavesSize, - EmitAsyncCFI && !HasFP, CFAOffset, - MFI.hasVarSizedObjects() || LocalsSize); } + ZPRCalleeSavesEnd = AfterSVESavesI; } - CFAOffset += SVECalleeSavesSize; if (EmitAsyncCFI) - emitCalleeSavedSVELocations(CalleeSavesEnd); - - // Allocate space for the rest of the frame including SVE locals. Align the - // stack as necessary. - assert(!(AFL.canUseRedZone(MF) && NeedsRealignment) && - "Cannot use redzone with stack realignment"); - if (!AFL.canUseRedZone(MF)) { - // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have - // the correct value here, as NumBytes also includes padding bytes, - // which shouldn't be counted here. - allocateStackSpace(CalleeSavesEnd, RealignmentPadding, - SVELocalsSize + StackOffset::getFixed(NumBytes), + emitCalleeSavedSVELocations(AfterSVESavesI); + + if (AFI->hasSplitSVEObjects()) { + assert(!FPAfterSVECalleeSaves && + "Cannot use FPAfterSVECalleeSaves with aarch64-split-sve-objects"); + assert(!AFL.canUseRedZone(MF) && + "Cannot use redzone with aarch64-split-sve-objects"); + // TODO: Handle HasWinCFI/NeedsWinCFI? + assert(!NeedsWinCFI && + "WinCFI with aarch64-split-sve-objects is not supported"); + + // Split ZPR and PPR allocation. + // Allocate PPR callee saves + allocateStackSpace(*PPRCalleeSavesBegin, 0, PPRCalleeSavesSize, + EmitAsyncCFI && !HasFP, CFAOffset, + MFI.hasVarSizedObjects() || ZPRCalleeSavesSize || + ZPRLocalsSize || PPRLocalsSize); + CFAOffset += PPRCalleeSavesSize; + + // Allocate PPR locals + ZPR callee saves + assert(PPRCalleeSavesEnd == ZPRCalleeSavesBegin && + "Expected ZPR callee saves after PPR locals"); + allocateStackSpace(*PPRCalleeSavesEnd, RealignmentPadding, + PPRLocalsSize + ZPRCalleeSavesSize, + EmitAsyncCFI && !HasFP, CFAOffset, + MFI.hasVarSizedObjects() || ZPRLocalsSize); + CFAOffset += PPRLocalsSize + ZPRCalleeSavesSize; + + // Allocate ZPR locals + allocateStackSpace(*ZPRCalleeSavesEnd, RealignmentPadding, + ZPRLocalsSize + StackOffset::getFixed(NumBytes), EmitAsyncCFI && !HasFP, CFAOffset, MFI.hasVarSizedObjects()); + } else { + // Allocate space for the callee saves (if any). + StackOffset LocalsSize = + PPRLocalsSize + ZPRLocalsSize + StackOffset::getFixed(NumBytes); + if (!FPAfterSVECalleeSaves) + allocateStackSpace(AfterGPRSavesI, 0, SVECalleeSavesSize, + EmitAsyncCFI && !HasFP, CFAOffset, + MFI.hasVarSizedObjects() || LocalsSize); + CFAOffset += SVECalleeSavesSize; + + // Allocate space for the rest of the frame including SVE locals. Align the + // stack as necessary. + assert(!(AFL.canUseRedZone(MF) && NeedsRealignment) && + "Cannot use redzone with stack realignment"); + if (!AFL.canUseRedZone(MF)) { + // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have + // the correct value here, as NumBytes also includes padding bytes, + // which shouldn't be counted here. + StackOffset SVELocalsSize = PPRLocalsSize + ZPRLocalsSize; + allocateStackSpace(AfterSVESavesI, RealignmentPadding, + SVELocalsSize + StackOffset::getFixed(NumBytes), + EmitAsyncCFI && !HasFP, CFAOffset, + MFI.hasVarSizedObjects()); + } } // If we need a base pointer, set it up here. It's whatever the value of the @@ -796,7 +860,8 @@ void AArch64PrologueEmitter::emitPrologue() { emitDefineCFAWithFP(AfterSVESavesI, FixedObject); } else { StackOffset TotalSize = - SVEStackSize + StackOffset::getFixed((int64_t)MFI.getStackSize()); + AFL.getSVEStackSize(MF) + + StackOffset::getFixed((int64_t)MFI.getStackSize()); CFIInstBuilder CFIBuilder(MBB, AfterSVESavesI, MachineInstr::FrameSetup); CFIBuilder.insertCFIInst( createDefCFA(RegInfo, /*FrameReg=*/AArch64::SP, /*Reg=*/AArch64::SP, @@ -1165,7 +1230,7 @@ void AArch64PrologueEmitter::emitCalleeSavedGPRLocations( CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup); for (const auto &Info : CSI) { unsigned FrameIdx = Info.getFrameIdx(); - if (MFI.isScalableStackID(FrameIdx)) + if (MFI.hasScalableStackID(FrameIdx)) continue; assert(!Info.isSpilledToReg() && "Spilling to registers not implemented"); @@ -1191,8 +1256,10 @@ void AArch64PrologueEmitter::emitCalleeSavedSVELocations( AFL.getOffsetOfLocalArea(); } + StackOffset PPRStackSize = AFL.getPPRStackSize(MF); for (const auto &Info : CSI) { - if (!MFI.isScalableStackID(Info.getFrameIdx())) + int FI = Info.getFrameIdx(); + if (!MFI.hasScalableStackID(FI)) continue; // Not all unwinders may know about SVE registers, so assume the lowest @@ -1203,9 +1270,13 @@ void AArch64PrologueEmitter::emitCalleeSavedSVELocations( continue; StackOffset Offset = - StackOffset::getScalable(MFI.getObjectOffset(Info.getFrameIdx())) - + StackOffset::getScalable(MFI.getObjectOffset(FI)) - StackOffset::getFixed(AFI->getCalleeSavedStackSize(MFI)); + if (AFI->hasSplitSVEObjects() && + MFI.getStackID(FI) == TargetStackID::ScalableVector) + Offset -= PPRStackSize; + CFIBuilder.insertCFIInst( createCFAOffset(RegInfo, Reg, Offset, IncomingVGOffsetFromDefCFA)); } @@ -1322,7 +1393,7 @@ void AArch64EpilogueEmitter::emitEpilogue() { while (FirstGPRRestoreI != Begin) { --FirstGPRRestoreI; if (!FirstGPRRestoreI->getFlag(MachineInstr::FrameDestroy) || - (!FPAfterSVECalleeSaves && isSVECalleeSave(FirstGPRRestoreI))) { + (!FPAfterSVECalleeSaves && isPartOfSVECalleeSaves(FirstGPRRestoreI))) { ++FirstGPRRestoreI; break; } else if (CombineSPBump) @@ -1346,7 +1417,9 @@ void AArch64EpilogueEmitter::emitEpilogue() { if (HasFP && AFI->hasSwiftAsyncContext()) emitSwiftAsyncContextFramePointer(EpilogueEndI, DL); - const StackOffset &SVEStackSize = AFL.getSVEStackSize(MF); + StackOffset ZPRStackSize = AFL.getZPRStackSize(MF); + StackOffset PPRStackSize = AFL.getPPRStackSize(MF); + StackOffset SVEStackSize = ZPRStackSize + PPRStackSize; // If there is a single SP update, insert it before the ret and we're done. if (CombineSPBump) { @@ -1367,106 +1440,188 @@ void AArch64EpilogueEmitter::emitEpilogue() { NumBytes -= PrologueSaveSize; assert(NumBytes >= 0 && "Negative stack allocation size!?"); - // Process the SVE callee-saves to determine what space needs to be - // deallocated. - StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize; - MachineBasicBlock::iterator RestoreBegin = FirstGPRRestoreI, - RestoreEnd = FirstGPRRestoreI; - if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) { - if (FPAfterSVECalleeSaves) - RestoreEnd = MBB.getFirstTerminator(); - - RestoreBegin = std::prev(RestoreEnd); - while (RestoreBegin != MBB.begin() && - isSVECalleeSave(std::prev(RestoreBegin))) - --RestoreBegin; - - assert(isSVECalleeSave(RestoreBegin) && - isSVECalleeSave(std::prev(RestoreEnd)) && "Unexpected instruction"); - - StackOffset CalleeSavedSizeAsOffset = - StackOffset::getScalable(CalleeSavedSize); - DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset; - DeallocateAfter = CalleeSavedSizeAsOffset; - } - - // Deallocate the SVE area. - if (FPAfterSVECalleeSaves) { - // If the callee-save area is before FP, restoring the FP implicitly - // deallocates non-callee-save SVE allocations. Otherwise, deallocate - // them explicitly. - if (!AFI->isStackRealigned() && !MFI.hasVarSizedObjects()) { - emitFrameOffset(MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::SP, - DeallocateBefore, TII, MachineInstr::FrameDestroy, false, - NeedsWinCFI, &HasWinCFI); + if (!AFI->hasSplitSVEObjects()) { + // Process the SVE callee-saves to determine what space needs to be + // deallocated. + StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize; + MachineBasicBlock::iterator RestoreBegin = FirstGPRRestoreI, + RestoreEnd = FirstGPRRestoreI; + int64_t ZPRCalleeSavedSize = AFI->getZPRCalleeSavedStackSize(); + int64_t PPRCalleeSavedSize = AFI->getPPRCalleeSavedStackSize(); + int64_t SVECalleeSavedSize = ZPRCalleeSavedSize + PPRCalleeSavedSize; + + if (SVECalleeSavedSize) { + if (FPAfterSVECalleeSaves) + RestoreEnd = MBB.getFirstTerminator(); + + RestoreBegin = std::prev(RestoreEnd); + while (RestoreBegin != MBB.begin() && + isPartOfSVECalleeSaves(std::prev(RestoreBegin))) + --RestoreBegin; + + assert(isPartOfSVECalleeSaves(RestoreBegin) && + isPartOfSVECalleeSaves(std::prev(RestoreEnd)) && + "Unexpected instruction"); + + StackOffset CalleeSavedSizeAsOffset = + StackOffset::getScalable(SVECalleeSavedSize); + DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset; + DeallocateAfter = CalleeSavedSizeAsOffset; } - // Deallocate callee-save non-SVE registers. - emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, - StackOffset::getFixed(AFI->getCalleeSavedStackSize()), TII, - MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI); - - // Deallocate fixed objects. - emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP, - StackOffset::getFixed(FixedObject), TII, - MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI); - - // Deallocate callee-save SVE registers. - emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP, - DeallocateAfter, TII, MachineInstr::FrameDestroy, false, - NeedsWinCFI, &HasWinCFI); - } else if (SVEStackSize) { - int64_t SVECalleeSavedSize = AFI->getSVECalleeSavedStackSize(); - // If we have stack realignment or variable-sized objects we must use the - // FP to restore SVE callee saves (as there is an unknown amount of - // data/padding between the SP and SVE CS area). - Register BaseForSVEDealloc = - (AFI->isStackRealigned() || MFI.hasVarSizedObjects()) ? AArch64::FP - : AArch64::SP; - if (SVECalleeSavedSize && BaseForSVEDealloc == AArch64::FP) { - Register CalleeSaveBase = AArch64::FP; - if (int64_t CalleeSaveBaseOffset = - AFI->getCalleeSaveBaseToFrameRecordOffset()) { - // If we have have an non-zero offset to the non-SVE CS base we need to - // compute the base address by subtracting the offest in a temporary - // register first (to avoid briefly deallocating the SVE CS). - CalleeSaveBase = - MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass); - emitFrameOffset(MBB, RestoreBegin, DL, CalleeSaveBase, AArch64::FP, - StackOffset::getFixed(-CalleeSaveBaseOffset), TII, - MachineInstr::FrameDestroy); - } - // The code below will deallocate the stack space space by moving the - // SP to the start of the SVE callee-save area. - emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, CalleeSaveBase, - StackOffset::getScalable(-SVECalleeSavedSize), TII, - MachineInstr::FrameDestroy); - } else if (BaseForSVEDealloc == AArch64::SP) { - if (SVECalleeSavedSize) { - // Deallocate the non-SVE locals first before we can deallocate (and - // restore callee saves) from the SVE area. - emitFrameOffset( - MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, - StackOffset::getFixed(NumBytes), TII, MachineInstr::FrameDestroy, - false, NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP, - SVEStackSize + StackOffset::getFixed(NumBytes + PrologueSaveSize)); - NumBytes = 0; + // Deallocate the SVE area. + if (FPAfterSVECalleeSaves) { + // If the callee-save area is before FP, restoring the FP implicitly + // deallocates non-callee-save SVE allocations. Otherwise, deallocate + // them explicitly. + if (!AFI->isStackRealigned() && !MFI.hasVarSizedObjects()) { + emitFrameOffset(MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::SP, + DeallocateBefore, TII, MachineInstr::FrameDestroy, + false, NeedsWinCFI, &HasWinCFI); } + // Deallocate callee-save non-SVE registers. emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, - DeallocateBefore, TII, MachineInstr::FrameDestroy, false, - NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP, - SVEStackSize + - StackOffset::getFixed(NumBytes + PrologueSaveSize)); + StackOffset::getFixed(AFI->getCalleeSavedStackSize()), + TII, MachineInstr::FrameDestroy, false, NeedsWinCFI, + &HasWinCFI); + + // Deallocate fixed objects. + emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP, + StackOffset::getFixed(FixedObject), TII, + MachineInstr::FrameDestroy, false, NeedsWinCFI, + &HasWinCFI); + // Deallocate callee-save SVE registers. emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP, DeallocateAfter, TII, MachineInstr::FrameDestroy, false, - NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP, - DeallocateAfter + - StackOffset::getFixed(NumBytes + PrologueSaveSize)); + NeedsWinCFI, &HasWinCFI); + } else if (SVEStackSize) { + int64_t SVECalleeSavedSize = AFI->getSVECalleeSavedStackSize(); + // If we have stack realignment or variable-sized objects we must use the + // FP to restore SVE callee saves (as there is an unknown amount of + // data/padding between the SP and SVE CS area). + Register BaseForSVEDealloc = + (AFI->isStackRealigned() || MFI.hasVarSizedObjects()) ? AArch64::FP + : AArch64::SP; + if (SVECalleeSavedSize && BaseForSVEDealloc == AArch64::FP) { + Register CalleeSaveBase = AArch64::FP; + if (int64_t CalleeSaveBaseOffset = + AFI->getCalleeSaveBaseToFrameRecordOffset()) { + // If we have have an non-zero offset to the non-SVE CS base we need + // to compute the base address by subtracting the offest in a + // temporary register first (to avoid briefly deallocating the SVE + // CS). + CalleeSaveBase = MBB.getParent()->getRegInfo().createVirtualRegister( + &AArch64::GPR64RegClass); + emitFrameOffset(MBB, RestoreBegin, DL, CalleeSaveBase, AArch64::FP, + StackOffset::getFixed(-CalleeSaveBaseOffset), TII, + MachineInstr::FrameDestroy); + } + // The code below will deallocate the stack space space by moving the + // SP to the start of the SVE callee-save area. + emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, CalleeSaveBase, + StackOffset::getScalable(-SVECalleeSavedSize), TII, + MachineInstr::FrameDestroy); + } else if (BaseForSVEDealloc == AArch64::SP) { + if (SVECalleeSavedSize) { + // Deallocate the non-SVE locals first before we can deallocate (and + // restore callee saves) from the SVE area. + emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, + StackOffset::getFixed(NumBytes), TII, + MachineInstr::FrameDestroy, false, NeedsWinCFI, + &HasWinCFI, EmitCFI && !HasFP, + SVEStackSize + StackOffset::getFixed( + NumBytes + PrologueSaveSize)); + NumBytes = 0; + } + + emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, + DeallocateBefore, TII, MachineInstr::FrameDestroy, + false, NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP, + SVEStackSize + + StackOffset::getFixed(NumBytes + PrologueSaveSize)); + + emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP, + DeallocateAfter, TII, MachineInstr::FrameDestroy, false, + NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP, + DeallocateAfter + + StackOffset::getFixed(NumBytes + PrologueSaveSize)); + } + + if (EmitCFI) + emitCalleeSavedSVERestores(RestoreEnd); + } + } else if (AFI->hasSplitSVEObjects() && SVEStackSize) { + // TODO: Support stack realigment and variable-sized objects. + assert(!AFI->isStackRealigned() && !MFI.hasVarSizedObjects() && + "unexpected stack realignment or variable sized objects with split " + "SVE stack objects"); + // SplitSVEObjects. Determine the sizes and starts/ends of the ZPR and PPR + // areas. + auto ZPRCalleeSavedSize = + StackOffset::getScalable(AFI->getZPRCalleeSavedStackSize()); + auto PPRCalleeSavedSize = + StackOffset::getScalable(AFI->getPPRCalleeSavedStackSize()); + StackOffset PPRLocalsSize = PPRStackSize - PPRCalleeSavedSize; + StackOffset ZPRLocalsSize = ZPRStackSize - ZPRCalleeSavedSize; + + MachineBasicBlock::iterator PPRRestoreBegin = FirstGPRRestoreI, + PPRRestoreEnd = FirstGPRRestoreI; + if (PPRCalleeSavedSize) { + PPRRestoreBegin = std::prev(PPRRestoreEnd); + while (PPRRestoreBegin != MBB.begin() && + isPartOfPPRCalleeSaves(std::prev(PPRRestoreBegin))) + --PPRRestoreBegin; + } + + MachineBasicBlock::iterator ZPRRestoreBegin = PPRRestoreBegin, + ZPRRestoreEnd = PPRRestoreBegin; + if (ZPRCalleeSavedSize) { + ZPRRestoreBegin = std::prev(ZPRRestoreEnd); + while (ZPRRestoreBegin != MBB.begin() && + isPartOfZPRCalleeSaves(std::prev(ZPRRestoreBegin))) + --ZPRRestoreBegin; } + + auto CFAOffset = + SVEStackSize + StackOffset::getFixed(NumBytes + PrologueSaveSize); + if (PPRCalleeSavedSize || ZPRCalleeSavedSize) { + // Deallocate the non-SVE locals first before we can deallocate (and + // restore callee saves) from the SVE area. + auto NonSVELocals = StackOffset::getFixed(NumBytes); + emitFrameOffset(MBB, ZPRRestoreBegin, DL, AArch64::SP, AArch64::SP, + NonSVELocals, TII, MachineInstr::FrameDestroy, false, + false, nullptr, EmitCFI && !HasFP, CFAOffset); + NumBytes = 0; + CFAOffset -= NonSVELocals; + } + + if (ZPRLocalsSize) { + emitFrameOffset(MBB, ZPRRestoreBegin, DL, AArch64::SP, AArch64::SP, + ZPRLocalsSize, TII, MachineInstr::FrameDestroy, false, + false, nullptr, EmitCFI && !HasFP, CFAOffset); + CFAOffset -= ZPRLocalsSize; + } + + if (PPRLocalsSize || ZPRCalleeSavedSize) { + assert(PPRRestoreBegin == ZPRRestoreEnd && + "Expected PPR restores after ZPR"); + emitFrameOffset(MBB, PPRRestoreBegin, DL, AArch64::SP, AArch64::SP, + PPRLocalsSize + ZPRCalleeSavedSize, TII, + MachineInstr::FrameDestroy, false, false, nullptr, + EmitCFI && !HasFP, CFAOffset); + CFAOffset -= PPRLocalsSize + ZPRCalleeSavedSize; + } + if (PPRCalleeSavedSize) { + emitFrameOffset(MBB, PPRRestoreEnd, DL, AArch64::SP, AArch64::SP, + PPRCalleeSavedSize, TII, MachineInstr::FrameDestroy, + false, false, nullptr, EmitCFI && !HasFP, CFAOffset); + } + + // We only emit CFI information for ZPRs so emit CFI after the ZPR restores. if (EmitCFI) - emitCalleeSavedSVERestores(RestoreEnd); + emitCalleeSavedSVERestores(ZPRRestoreEnd); } if (!HasFP) { @@ -1624,7 +1779,7 @@ void AArch64EpilogueEmitter::emitCalleeSavedRestores( CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameDestroy); for (const auto &Info : CSI) { - if (SVE != MFI.isScalableStackID(Info.getFrameIdx())) + if (SVE != MFI.hasScalableStackID(Info.getFrameIdx())) continue; MCRegister Reg = Info.getReg(); diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp index 2b0c8ad..79975b0 100644 --- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -71,6 +71,7 @@ bool AArch64RegisterInfo::regNeedsCFI(MCRegister Reg, const MCPhysReg * AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { assert(MF && "Invalid MachineFunction pointer."); + auto &AFI = *MF->getInfo<AArch64FunctionInfo>(); if (MF->getFunction().getCallingConv() == CallingConv::GHC) // GHC set of callee saved regs is empty as all those regs are @@ -101,10 +102,7 @@ AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList; if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall) return CSR_Win_AArch64_AAVPCS_SaveList; - if (MF->getFunction().getCallingConv() == - CallingConv::AArch64_SVE_VectorCall) - return CSR_Win_AArch64_SVE_AAPCS_SaveList; - if (MF->getInfo<AArch64FunctionInfo>()->isSVECC()) + if (AFI.hasSVE_AAPCS(*MF)) return CSR_Win_AArch64_SVE_AAPCS_SaveList; return CSR_Win_AArch64_AAPCS_SaveList; } @@ -148,7 +146,7 @@ AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { // This is for OSes other than Windows; Windows is a separate case further // above. return CSR_AArch64_AAPCS_X18_SaveList; - if (MF->getInfo<AArch64FunctionInfo>()->isSVECC()) + if (AFI.hasSVE_AAPCS(*MF)) return CSR_AArch64_SVE_AAPCS_SaveList; return CSR_AArch64_AAPCS_SaveList; } @@ -158,6 +156,7 @@ AArch64RegisterInfo::getDarwinCalleeSavedRegs(const MachineFunction *MF) const { assert(MF && "Invalid MachineFunction pointer."); assert(MF->getSubtarget<AArch64Subtarget>().isTargetDarwin() && "Invalid subtarget for getDarwinCalleeSavedRegs"); + auto &AFI = *MF->getInfo<AArch64FunctionInfo>(); if (MF->getFunction().getCallingConv() == CallingConv::CFGuard_Check) report_fatal_error( @@ -205,7 +204,7 @@ AArch64RegisterInfo::getDarwinCalleeSavedRegs(const MachineFunction *MF) const { return CSR_Darwin_AArch64_RT_AllRegs_SaveList; if (MF->getFunction().getCallingConv() == CallingConv::Win64) return CSR_Darwin_AArch64_AAPCS_Win64_SaveList; - if (MF->getInfo<AArch64FunctionInfo>()->isSVECC()) + if (AFI.hasSVE_AAPCS(*MF)) return CSR_Darwin_AArch64_SVE_AAPCS_SaveList; return CSR_Darwin_AArch64_AAPCS_SaveList; } @@ -643,7 +642,7 @@ bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const { if (ST.hasSVE() || ST.isStreaming()) { // Frames that have variable sized objects and scalable SVE objects, // should always use a basepointer. - if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE()) + if (!AFI->hasCalculatedStackSizeSVE() || AFI->hasSVEStackSize()) return true; } @@ -783,7 +782,7 @@ AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const { assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() || AFI->hasCalculatedStackSizeSVE()) && "Expected SVE area to be calculated by this point"); - return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->getStackSizeSVE() && + return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->hasSVEStackSize() && !AFI->hasStackHazardSlotIndex(); } diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index 36c9cb6..bc6b931 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1010,6 +1010,36 @@ let Predicates = [HasSVE_or_SME] in { defm SEL_ZPZZ : sve_int_sel_vvv<"sel", vselect>; defm SPLICE_ZPZ : sve_int_perm_splice<"splice", AArch64splice>; + + // mul x (splat -1) -> neg x + def : Pat<(nxv16i8 (AArch64mul_m1 nxv16i1:$Op1, nxv16i8:$Op2, (nxv16i8 (splat_vector (i32 -1))))), + (NEG_ZPmZ_B $Op2, $Op1, $Op2)>; + def : Pat<(nxv8i16 (AArch64mul_m1 nxv8i1:$Op1, nxv8i16:$Op2, (nxv8i16 (splat_vector (i32 -1))))), + (NEG_ZPmZ_H $Op2, $Op1, $Op2)>; + def : Pat<(nxv4i32 (AArch64mul_m1 nxv4i1:$Op1, nxv4i32:$Op2, (nxv4i32 (splat_vector (i32 -1))))), + (NEG_ZPmZ_S $Op2, $Op1, $Op2)>; + def : Pat<(nxv2i64 (AArch64mul_m1 nxv2i1:$Op1, nxv2i64:$Op2, (nxv2i64 (splat_vector (i64 -1))))), + (NEG_ZPmZ_D $Op2, $Op1, $Op2)>; + + let AddedComplexity = 5 in { + def : Pat<(nxv16i8 (AArch64mul_p nxv16i1:$Op1, nxv16i8:$Op2, (nxv16i8 (splat_vector (i32 -1))))), + (NEG_ZPmZ_B_UNDEF $Op2, $Op1, $Op2)>; + def : Pat<(nxv8i16 (AArch64mul_p nxv8i1:$Op1, nxv8i16:$Op2, (nxv8i16 (splat_vector (i32 -1))))), + (NEG_ZPmZ_H_UNDEF $Op2, $Op1, $Op2)>; + def : Pat<(nxv4i32 (AArch64mul_p nxv4i1:$Op1, nxv4i32:$Op2, (nxv4i32 (splat_vector (i32 -1))))), + (NEG_ZPmZ_S_UNDEF $Op2, $Op1, $Op2)>; + def : Pat<(nxv2i64 (AArch64mul_p nxv2i1:$Op1, nxv2i64:$Op2, (nxv2i64 (splat_vector (i64 -1))))), + (NEG_ZPmZ_D_UNDEF $Op2, $Op1, $Op2)>; + } + + def : Pat<(nxv16i8 (AArch64mul_m1 nxv16i1:$Op1, (nxv16i8 (splat_vector (i32 -1))), nxv16i8:$Op2)), + (NEG_ZPmZ_B (DUP_ZI_B -1, 0), $Op1, $Op2)>; + def : Pat<(nxv8i16 (AArch64mul_m1 nxv8i1:$Op1, (nxv8i16 (splat_vector (i32 -1))), nxv8i16:$Op2)), + (NEG_ZPmZ_H (DUP_ZI_H -1, 0), $Op1, $Op2)>; + def : Pat<(nxv4i32 (AArch64mul_m1 nxv4i1:$Op1, (nxv4i32 (splat_vector (i32 -1))), nxv4i32:$Op2)), + (NEG_ZPmZ_S (DUP_ZI_S -1, 0), $Op1, $Op2)>; + def : Pat<(nxv2i64 (AArch64mul_m1 nxv2i1:$Op1, (nxv2i64 (splat_vector (i64 -1))), nxv2i64:$Op2)), + (NEG_ZPmZ_D (DUP_ZI_D -1, 0), $Op1, $Op2)>; } // End HasSVE_or_SME // COMPACT - word and doubleword diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index 8c4b4f6..50a8754 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -5632,75 +5632,94 @@ InstructionCost AArch64TTIImpl::getPartialReductionCost( TTI::PartialReductionExtendKind OpBExtend, std::optional<unsigned> BinOp, TTI::TargetCostKind CostKind) const { InstructionCost Invalid = InstructionCost::getInvalid(); - InstructionCost Cost(TTI::TCC_Basic); if (CostKind != TTI::TCK_RecipThroughput) return Invalid; - // Sub opcodes currently only occur in chained cases. - // Independent partial reduction subtractions are still costed as an add + if (VF.isFixed() && !ST->isSVEorStreamingSVEAvailable() && + (!ST->isNeonAvailable() || !ST->hasDotProd())) + return Invalid; + if ((Opcode != Instruction::Add && Opcode != Instruction::Sub) || OpAExtend == TTI::PR_None) return Invalid; + assert((BinOp || (OpBExtend == TTI::PR_None && !InputTypeB)) && + (!BinOp || (OpBExtend != TTI::PR_None && InputTypeB)) && + "Unexpected values for OpBExtend or InputTypeB"); + // We only support multiply binary operations for now, and for muls we // require the types being extended to be the same. - // NOTE: For muls AArch64 supports lowering mixed extensions to a usdot but - // only if the i8mm or sve/streaming features are available. - if (BinOp && (*BinOp != Instruction::Mul || InputTypeA != InputTypeB || - OpBExtend == TTI::PR_None || - (OpAExtend != OpBExtend && !ST->hasMatMulInt8() && - !ST->isSVEorStreamingSVEAvailable()))) + if (BinOp && (*BinOp != Instruction::Mul || InputTypeA != InputTypeB)) return Invalid; - assert((BinOp || (OpBExtend == TTI::PR_None && !InputTypeB)) && - "Unexpected values for OpBExtend or InputTypeB"); - EVT InputEVT = EVT::getEVT(InputTypeA); - EVT AccumEVT = EVT::getEVT(AccumType); + bool IsUSDot = OpBExtend != TTI::PR_None && OpAExtend != OpBExtend; + if (IsUSDot && !ST->hasMatMulInt8()) + return Invalid; + + unsigned Ratio = + AccumType->getScalarSizeInBits() / InputTypeA->getScalarSizeInBits(); + if (VF.getKnownMinValue() <= Ratio) + return Invalid; + + VectorType *InputVectorType = VectorType::get(InputTypeA, VF); + VectorType *AccumVectorType = + VectorType::get(AccumType, VF.divideCoefficientBy(Ratio)); + // We don't yet support all kinds of legalization. + auto TA = TLI->getTypeAction(AccumVectorType->getContext(), + EVT::getEVT(AccumVectorType)); + switch (TA) { + default: + return Invalid; + case TargetLowering::TypeLegal: + case TargetLowering::TypePromoteInteger: + case TargetLowering::TypeSplitVector: + break; + } + + // Check what kind of type-legalisation happens. + std::pair<InstructionCost, MVT> AccumLT = + getTypeLegalizationCost(AccumVectorType); + std::pair<InstructionCost, MVT> InputLT = + getTypeLegalizationCost(InputVectorType); - unsigned VFMinValue = VF.getKnownMinValue(); + InstructionCost Cost = InputLT.first * TTI::TCC_Basic; - if (VF.isScalable()) { - if (!ST->isSVEorStreamingSVEAvailable()) - return Invalid; + // Prefer using full types by costing half-full input types as more expensive. + if (TypeSize::isKnownLT(InputVectorType->getPrimitiveSizeInBits(), + TypeSize::getScalable(128))) + // FIXME: This can be removed after the cost of the extends are folded into + // the dot-product expression in VPlan, after landing: + // https://github.com/llvm/llvm-project/pull/147302 + Cost *= 2; - // Don't accept a partial reduction if the scaled accumulator is vscale x 1, - // since we can't lower that type. - unsigned Scale = - AccumEVT.getScalarSizeInBits() / InputEVT.getScalarSizeInBits(); - if (VFMinValue == Scale) - return Invalid; + if (ST->isSVEorStreamingSVEAvailable() && !IsUSDot) { + // i16 -> i64 is natively supported for udot/sdot + if (AccumLT.second.getScalarType() == MVT::i64 && + InputLT.second.getScalarType() == MVT::i16) + return Cost; + // i8 -> i64 is supported with an extra level of extends + if (AccumLT.second.getScalarType() == MVT::i64 && + InputLT.second.getScalarType() == MVT::i8) + // FIXME: This cost should probably be a little higher, e.g. Cost + 2 + // because it requires two extra extends on the inputs. But if we'd change + // that now, a regular reduction would be cheaper because the costs of + // the extends in the IR are still counted. This can be fixed + // after https://github.com/llvm/llvm-project/pull/147302 has landed. + return Cost; } - if (VF.isFixed() && - (!ST->isNeonAvailable() || !ST->hasDotProd() || AccumEVT == MVT::i64)) - return Invalid; - if (InputEVT == MVT::i8) { - switch (VFMinValue) { - default: - return Invalid; - case 8: - if (AccumEVT == MVT::i32) - Cost *= 2; - else if (AccumEVT != MVT::i64) - return Invalid; - break; - case 16: - if (AccumEVT == MVT::i64) - Cost *= 2; - else if (AccumEVT != MVT::i32) - return Invalid; - break; - } - } else if (InputEVT == MVT::i16) { - // FIXME: Allow i32 accumulator but increase cost, as we would extend - // it to i64. - if (VFMinValue != 8 || AccumEVT != MVT::i64) - return Invalid; - } else - return Invalid; + // i8 -> i32 is natively supported for udot/sdot/usdot, both for NEON and SVE. + if (ST->isSVEorStreamingSVEAvailable() || + (AccumLT.second.isFixedLengthVector() && ST->isNeonAvailable() && + ST->hasDotProd())) { + if (AccumLT.second.getScalarType() == MVT::i32 && + InputLT.second.getScalarType() == MVT::i8) + return Cost; + } - return Cost; + // Add additional cost for the extends that would need to be inserted. + return Cost + 4; } InstructionCost diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td index 7003a40..9446144 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPU.td +++ b/llvm/lib/Target/AMDGPU/AMDGPU.td @@ -2126,6 +2126,8 @@ def FeatureISAVersion12_50 : FeatureSet< FeatureLdsBarrierArriveAtomic, FeatureSetPrioIncWgInst, Feature45BitNumRecordsBufferResource, + FeatureSupportsXNACK, + FeatureXNACK, ]>; def FeatureISAVersion12_51 : FeatureSet< diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp index f01d5f6..a4ef524 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp @@ -647,7 +647,7 @@ public: ModuleScopeVariables.insert(GV); } else if (K.second.size() == 1) { KernelAccessVariables.insert(GV); - } else if (set_is_subset(K.second, HybridModuleRootKernels)) { + } else if (K.second == HybridModuleRootKernels) { ModuleScopeVariables.insert(GV); } else { TableLookupVariables.insert(GV); diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp index 2d5ae29..2120bf8 100644 --- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp +++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp @@ -2303,7 +2303,10 @@ Expected<bool> AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC1( KdStream << Indent << ".amdhsa_reserve_vcc " << 0 << '\n'; if (!hasArchitectedFlatScratch()) KdStream << Indent << ".amdhsa_reserve_flat_scratch " << 0 << '\n'; - KdStream << Indent << ".amdhsa_reserve_xnack_mask " << 0 << '\n'; + bool ReservedXnackMask = STI.hasFeature(AMDGPU::FeatureXNACK); + assert(!ReservedXnackMask || STI.hasFeature(AMDGPU::FeatureSupportsXNACK)); + KdStream << Indent << ".amdhsa_reserve_xnack_mask " << ReservedXnackMask + << '\n'; KdStream << Indent << ".amdhsa_next_free_sgpr " << NextFreeSGPR << "\n"; CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_PRIORITY); diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index fed3778..82789bc 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -722,7 +722,8 @@ bool SIFoldOperandsImpl::updateOperand(FoldCandidate &Fold) const { return false; } - if (!MRI->constrainRegClass(New->getReg(), ConstrainRC)) { + if (New->getReg().isVirtual() && + !MRI->constrainRegClass(New->getReg(), ConstrainRC)) { LLVM_DEBUG(dbgs() << "Cannot constrain " << printReg(New->getReg(), TRI) << TRI->getRegClassName(ConstrainRC) << '\n'); return false; diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp index e4b3528..0189e7b 100644 --- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp @@ -306,7 +306,8 @@ class PrologEpilogSGPRSpillBuilder { buildEpilogRestore(ST, TRI, *FuncInfo, LiveUnits, MF, MBB, MI, DL, TmpVGPR, FI, FrameReg, DwordOff); - MRI.constrainRegClass(SubReg, &AMDGPU::SReg_32_XM0RegClass); + assert(SubReg.isPhysical()); + BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), SubReg) .addReg(TmpVGPR, RegState::Kill); DwordOff += 4; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index f7265c5..79876ff 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -18879,7 +18879,7 @@ bool SITargetLowering::checkForPhysRegDependency( PhysReg = AMDGPU::SCC; const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(PhysReg, Def->getSimpleValueType(ResNo)); - Cost = RC->getCopyCost(); + Cost = RC->expensiveOrImpossibleToCopy() ? -1 : RC->getCopyCost(); return true; } return false; diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp index 76bfce8..5e27b37 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -1013,6 +1013,15 @@ void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) { } } } else if (T == X_CNT) { + WaitEventType OtherEvent = E == SMEM_GROUP ? VMEM_GROUP : SMEM_GROUP; + if (PendingEvents & (1 << OtherEvent)) { + // Hardware inserts an implicit xcnt between interleaved + // SMEM and VMEM operations. So there will never be + // outstanding address translations for both SMEM and + // VMEM at the same time. + setScoreLB(T, CurrScore - 1); + PendingEvents &= ~(1 << OtherEvent); + } for (const MachineOperand &Op : Inst.all_uses()) setScoreByOperand(&Inst, Op, T, CurrScore); } else /* LGKM_CNT || EXP_CNT || VS_CNT || NUM_INST_CNTS */ { @@ -2220,6 +2229,8 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst, // Now look at the instruction opcode. If it is a memory access // instruction, update the upper-bound of the appropriate counter's // bracket and the destination operand scores. + // For architectures with X_CNT, mark the source address operands + // with the appropriate counter values. // TODO: Use the (TSFlags & SIInstrFlags::DS_CNT) property everywhere. bool IsVMEMAccess = false; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 56435a5..cda8069 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -2112,8 +2112,6 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { case AMDGPU::SI_RESTORE_S32_FROM_VGPR: MI.setDesc(get(AMDGPU::V_READLANE_B32)); - MI.getMF()->getRegInfo().constrainRegClass(MI.getOperand(0).getReg(), - &AMDGPU::SReg_32_XM0RegClass); break; case AMDGPU::AV_MOV_B32_IMM_PSEUDO: { Register Dst = MI.getOperand(0).getReg(); @@ -8117,21 +8115,14 @@ void SIInstrInfo::moveToVALUImpl(SIInstrWorklist &Worklist, // hope for the best. if (Inst.isCopy() && DstReg.isPhysical() && RI.isVGPR(MRI, Inst.getOperand(1).getReg())) { - // TODO: Only works for 32 bit registers. - if (MRI.constrainRegClass(DstReg, &AMDGPU::SReg_32_XM0RegClass)) { - BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(), - get(AMDGPU::V_READFIRSTLANE_B32), DstReg) - .add(Inst.getOperand(1)); - } else { - Register NewDst = - MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); - BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(), - get(AMDGPU::V_READFIRSTLANE_B32), NewDst) - .add(Inst.getOperand(1)); - BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), - DstReg) - .addReg(NewDst); - } + Register NewDst = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); + BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(), + get(AMDGPU::V_READFIRSTLANE_B32), NewDst) + .add(Inst.getOperand(1)); + BuildMI(*Inst.getParent(), &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), + DstReg) + .addReg(NewDst); + Inst.eraseFromParent(); return; } diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp index 205237f..3c2dd42 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -2222,8 +2222,6 @@ bool SIRegisterInfo::spillEmergencySGPR(MachineBasicBlock::iterator MI, // Don't need to write VGPR out. } - MachineRegisterInfo &MRI = MI->getMF()->getRegInfo(); - // Restore clobbered registers in the specified restore block. MI = RestoreMBB.end(); SB.setMI(&RestoreMBB, MI); @@ -2238,7 +2236,8 @@ bool SIRegisterInfo::spillEmergencySGPR(MachineBasicBlock::iterator MI, SB.NumSubRegs == 1 ? SB.SuperReg : Register(getSubReg(SB.SuperReg, SB.SplitParts[i])); - MRI.constrainRegClass(SubReg, &AMDGPU::SReg_32_XM0RegClass); + + assert(SubReg.isPhysical()); bool LastSubReg = (i + 1 == e); auto MIB = BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32), SubReg) @@ -3059,8 +3058,7 @@ bool SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, if (IsSALU && LiveSCC) { Register NewDest; if (IsCopy) { - MF->getRegInfo().constrainRegClass(ResultReg, - &AMDGPU::SReg_32_XM0RegClass); + assert(ResultReg.isPhysical()); NewDest = ResultReg; } else { NewDest = RS->scavengeRegisterBackwards(AMDGPU::SReg_32_XM0RegClass, @@ -3190,8 +3188,6 @@ bool SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, Register NewDest; if (IsCopy) { - MF->getRegInfo().constrainRegClass(ResultReg, - &AMDGPU::SReg_32_XM0RegClass); NewDest = ResultReg; } else { NewDest = RS->scavengeRegisterBackwards( diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td index 8f1dd62..5630580 100644 --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td @@ -1163,6 +1163,22 @@ def VS_64_Lo256 : SIRegisterClass<"AMDGPU", VReg_64.RegTypes, 32, let HasSGPR = 1; let Size = 64; } + +def VS_128 : SIRegisterClass<"AMDGPU", VReg_128.RegTypes, 32, + (add VReg_128, SReg_128)> { + let isAllocatable = 0; + let HasVGPR = 1; + let HasSGPR = 1; + let Size = 128; +} + +def VS_128_Align2 : SIRegisterClass<"AMDGPU", VReg_128.RegTypes, 32, + (add VReg_128_Align2, SReg_128)> { + let isAllocatable = 0; + let HasVGPR = 1; + let HasSGPR = 1; + let Size = 128; +} } // End GeneratePressureSet = 0 // Define a register tuple class, along with one requiring an even diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td index b3fd8c7..84287b6 100644 --- a/llvm/lib/Target/AMDGPU/SOPInstructions.td +++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td @@ -352,10 +352,12 @@ def S_XNOR_SAVEEXEC_B64 : SOP1_64 <"s_xnor_saveexec_b64">; } // End hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC] +let Defs = [SCC] in { def S_QUADMASK_B32 : SOP1_32 <"s_quadmask_b32", [(set i32:$sdst, (int_amdgcn_s_quadmask i32:$src0))]>; def S_QUADMASK_B64 : SOP1_64 <"s_quadmask_b64", [(set i64:$sdst, (int_amdgcn_s_quadmask i64:$src0))]>; +} let Uses = [M0] in { def S_MOVRELS_B32 : SOP1_32R <"s_movrels_b32">; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h index fa130a1..26ff54c 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -775,6 +775,16 @@ class VectorType; bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override; + /// Return true if it is profitable to fold a pair of shifts into a mask. + bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override { + EVT VT = Y.getValueType(); + + if (VT.isVector()) + return false; + + return VT.getScalarSizeInBits() <= 32; + } + bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT, unsigned SelectOpcode, SDValue X, SDValue Y) const override; diff --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp index d0dfa47..0040504 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp @@ -449,6 +449,7 @@ HexagonTargetLowering::initializeHVXLowering() { // Include cases which are not hander earlier setOperationAction(ISD::UINT_TO_FP, MVT::v32i1, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::v64i1, Custom); + setOperationAction(ISD::SINT_TO_FP, MVT::v32i1, Custom); setTargetDAGCombine({ISD::CONCAT_VECTORS, ISD::TRUNCATE, ISD::VSELECT}); } @@ -2337,7 +2338,7 @@ HexagonTargetLowering::LowerHvxFpToInt(SDValue Op, SelectionDAG &DAG) const { return ExpandHvxFpToInt(Op, DAG); } -// For vector type v32i1 uint_to_fp to v32f32: +// For vector type v32i1 uint_to_fp/sint_to_fp to v32f32: // R1 = #1, R2 holds the v32i1 param // V1 = vsplat(R1) // V2 = vsplat(R2) @@ -2464,7 +2465,7 @@ HexagonTargetLowering::LowerHvxIntToFp(SDValue Op, SelectionDAG &DAG) const { MVT IntTy = ty(Op.getOperand(0)).getVectorElementType(); MVT FpTy = ResTy.getVectorElementType(); - if (Op.getOpcode() == ISD::UINT_TO_FP) { + if (Op.getOpcode() == ISD::UINT_TO_FP || Op.getOpcode() == ISD::SINT_TO_FP) { if (ResTy == MVT::v32f32 && ty(Op.getOperand(0)) == MVT::v32i1) return LowerHvxPred32ToFp(Op, DAG); if (ResTy == MVT::v64f16 && ty(Op.getOperand(0)) == MVT::v64i1) diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index 4cfbfca..7ddf996 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -2860,8 +2860,7 @@ static SDValue fillSubVectorFromBuildVector(BuildVectorSDNode *Node, EVT ResTy, unsigned first) { unsigned NumElts = ResTy.getVectorNumElements(); - assert(first >= 0 && - first + NumElts <= Node->getSimpleValueType(0).getVectorNumElements()); + assert(first + NumElts <= Node->getSimpleValueType(0).getVectorNumElements()); SmallVector<SDValue, 16> Ops(Node->op_begin() + first, Node->op_begin() + first + NumElts); diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index 3ac7c28..8c21746 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -638,6 +638,11 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, // No support for these operations with v2f32/v2i32 setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32}, Expand); setOperationAction(ISD::VECTOR_SHUFFLE, {MVT::v2f32, MVT::v2i32}, Expand); + + setOperationAction(ISD::TRUNCATE, MVT::v2i16, Expand); + setOperationAction({ISD::ANY_EXTEND, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND}, + MVT::v2i32, Expand); + // Need custom lowering in case the index is dynamic. if (STI.hasF32x2Instructions()) setOperationAction(ISD::EXTRACT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32}, diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp index 395d2c4..662d3f6 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp @@ -629,7 +629,7 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) getActionDefinitionsBuilder({G_FCOS, G_FSIN, G_FTAN, G_FPOW, G_FLOG, G_FLOG2, G_FLOG10, G_FEXP, G_FEXP2, G_FEXP10, G_FACOS, G_FASIN, G_FATAN, G_FATAN2, G_FCOSH, G_FSINH, - G_FTANH}) + G_FTANH, G_FMODF}) .libcallFor({s32, s64}) .libcallFor(ST.is64Bit(), {s128}); getActionDefinitionsBuilder({G_FPOWI, G_FLDEXP}) diff --git a/llvm/lib/Target/RISCV/RISCVGISel.td b/llvm/lib/Target/RISCV/RISCVGISel.td index 19d5aff..cf6f83a 100644 --- a/llvm/lib/Target/RISCV/RISCVGISel.td +++ b/llvm/lib/Target/RISCV/RISCVGISel.td @@ -110,16 +110,16 @@ def : StPat<truncstorei8, SB, GPR, i16>; let Predicates = [HasAtomicLdSt] in { // Prefer unsigned due to no c.lb in Zcb. - def : LdPat<atomic_load_aext_8, LBU, i16>; - def : LdPat<atomic_load_nonext_16, LH, i16>; + def : LdPat<relaxed_load<atomic_load_aext_8>, LBU, i16>; + def : LdPat<relaxed_load<atomic_load_nonext_16>, LH, i16>; - def : StPat<atomic_store_8, SB, GPR, i16>; - def : StPat<atomic_store_16, SH, GPR, i16>; + def : StPat<relaxed_store<atomic_store_8>, SB, GPR, i16>; + def : StPat<relaxed_store<atomic_store_16>, SH, GPR, i16>; } let Predicates = [HasAtomicLdSt, IsRV64] in { - def : LdPat<atomic_load_nonext_32, LW, i32>; - def : StPat<atomic_store_32, SW, GPR, i32>; + // Load pattern is in RISCVInstrInfoA.td and shared with RV32. + def : StPat<relaxed_store<atomic_store_32>, SW, GPR, i32>; } //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 50649cf..dcce2d2 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -533,7 +533,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction({ISD::FREM, ISD::FPOW, ISD::FPOWI, ISD::FCOS, ISD::FSIN, ISD::FSINCOS, ISD::FEXP, ISD::FEXP2, ISD::FEXP10, ISD::FLOG, ISD::FLOG2, - ISD::FLOG10, ISD::FLDEXP, ISD::FFREXP}, + ISD::FLOG10, ISD::FLDEXP, ISD::FFREXP, ISD::FMODF}, MVT::f16, Promote); // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td index 99992d1..25accd9 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td @@ -174,15 +174,14 @@ let Predicates = [HasAtomicLdSt] in { def : StPat<relaxed_store<atomic_store_8>, SB, GPR, XLenVT>; def : StPat<relaxed_store<atomic_store_16>, SH, GPR, XLenVT>; def : StPat<relaxed_store<atomic_store_32>, SW, GPR, XLenVT>; -} -let Predicates = [HasAtomicLdSt, IsRV32] in { - def : LdPat<relaxed_load<atomic_load_nonext_32>, LW>; + // Used by GISel for RV32 and RV64. + def : LdPat<relaxed_load<atomic_load_nonext_32>, LW, i32>; } let Predicates = [HasAtomicLdSt, IsRV64] in { - def : LdPat<relaxed_load<atomic_load_asext_32>, LW>; - def : LdPat<relaxed_load<atomic_load_zext_32>, LWU>; + def : LdPat<relaxed_load<atomic_load_asext_32>, LW, i64>; + def : LdPat<relaxed_load<atomic_load_zext_32>, LWU, i64>; def : LdPat<relaxed_load<atomic_load_nonext_64>, LD, i64>; def : StPat<relaxed_store<atomic_store_64>, SD, GPR, i64>; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index d998316..298d35a 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -554,7 +554,8 @@ defset list<VTypeInfoToWide> AllWidenableBF16ToFloatVectors = { // This represents the information we need in codegen for each pseudo. // The definition should be consistent with `struct PseudoInfo` in // RISCVInstrInfo.h. -class RISCVVPseudo<dag outs, dag ins, list<dag> pattern = [], string opcodestr = "", string argstr = ""> +class RISCVVPseudo<dag outs, dag ins, list<dag> pattern = [], + string opcodestr = "", string argstr = ""> : Pseudo<outs, ins, pattern, opcodestr, argstr> { Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key. Instruction BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst); @@ -1010,8 +1011,7 @@ class VPseudoNullaryNoMask<VReg RegClass> : class VPseudoNullaryMask<VReg RegClass> : RISCVVPseudo<(outs GetVRegNoV0<RegClass>.R:$rd), (ins GetVRegNoV0<RegClass>.R:$passthru, - VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), - []> { + VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1190,8 +1190,7 @@ class VPseudoBinaryNoMask<VReg RetClass, bits<2> TargetConstraintType = 1, DAGOperand sewop = sew> : RISCVVPseudo<(outs RetClass:$rd), - (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, sewop:$sew), - []> { + (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, sewop:$sew)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1227,8 +1226,7 @@ class VPseudoBinaryNoMaskRoundingMode<VReg RetClass, bits<2> TargetConstraintType = 1> : RISCVVPseudo<(outs RetClass:$rd), (ins RetClass:$passthru, Op1Class:$rs2, Op2Class:$rs1, - vec_rm:$rm, AVL:$vl, sew:$sew, vec_policy:$policy), - []> { + vec_rm:$rm, AVL:$vl, sew:$sew, vec_policy:$policy)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1320,7 +1318,7 @@ class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL, bit Ordered>: RISCVVPseudo<(outs), (ins StClass:$rd, GPRMemZeroOffset:$rs1, IdxClass:$rs2, - AVL:$vl, sew:$sew),[]>, + AVL:$vl, sew:$sew)>, RISCVVSX</*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> { let mayLoad = 0; let mayStore = 1; @@ -1333,7 +1331,7 @@ class VPseudoIStoreMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL, bit Ordered>: RISCVVPseudo<(outs), (ins StClass:$rd, GPRMemZeroOffset:$rs1, IdxClass:$rs2, - VMaskOp:$vm, AVL:$vl, sew:$sew),[]>, + VMaskOp:$vm, AVL:$vl, sew:$sew)>, RISCVVSX</*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> { let mayLoad = 0; let mayStore = 1; @@ -1351,8 +1349,7 @@ class VPseudoBinaryMaskPolicy<VReg RetClass, RISCVVPseudo<(outs GetVRegNoV0<RetClass>.R:$rd), (ins GetVRegNoV0<RetClass>.R:$passthru, Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), - []> { + VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1371,8 +1368,7 @@ class VPseudoTernaryMaskPolicy<VReg RetClass, RISCVVPseudo<(outs GetVRegNoV0<RetClass>.R:$rd), (ins GetVRegNoV0<RetClass>.R:$passthru, Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), - []> { + VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1414,8 +1410,7 @@ class VPseudoBinaryMOutMask<VReg RetClass, RISCVVPseudo<(outs RetClass:$rd), (ins RetClass:$passthru, Op1Class:$rs2, Op2Class:$rs1, - VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), - []> { + VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1438,8 +1433,7 @@ class VPseudoTiedBinaryMask<VReg RetClass, RISCVVPseudo<(outs GetVRegNoV0<RetClass>.R:$rd), (ins GetVRegNoV0<RetClass>.R:$passthru, Op2Class:$rs1, - VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), - []> { + VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1546,8 +1540,7 @@ class VPseudoTernaryNoMaskWithPolicyRoundingMode<VReg RetClass, bits<2> TargetConstraintType = 1> : RISCVVPseudo<(outs RetClass:$rd), (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, - vec_rm:$rm, AVL:$vl, sew:$sew, vec_policy:$policy), - []> { + vec_rm:$rm, AVL:$vl, sew:$sew, vec_policy:$policy)> { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; @@ -1716,8 +1709,8 @@ class VPseudoUSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF> : RISCVVPseudo<(outs), - (ins ValClass:$rd, GPRMemZeroOffset:$rs1, AVL:$vl, sew:$sew), - []>, + (ins ValClass:$rd, GPRMemZeroOffset:$rs1, AVL:$vl, + sew:$sew)>, RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> { let mayLoad = 0; let mayStore = 1; @@ -6029,9 +6022,9 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in { PseudoInstExpansion<(CSRRS GPR:$rd, SysRegVLENB.Encoding, X0)>, Sched<[WriteRdVLENB]>; let Defs = [VL, VTYPE] in { - def PseudoReadVLENBViaVSETVLIX0 : Pseudo<(outs GPRNoX0:$rd), (ins uimm5:$shamt), - []>, - Sched<[WriteVSETVLI, ReadVSETVLI]>; + def PseudoReadVLENBViaVSETVLIX0 : Pseudo<(outs GPRNoX0:$rd), + (ins uimm5:$shamt), []>, + Sched<[WriteVSETVLI, ReadVSETVLI]>; } } @@ -6694,14 +6687,14 @@ defm PseudoVID : VPseudoVID_V; let Predicates = [HasVInstructions] in { let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { let HasSEWOp = 1, BaseInstr = VMV_X_S in - def PseudoVMV_X_S: + def PseudoVMV_X_S : RISCVVPseudo<(outs GPR:$rd), (ins VR:$rs2, sew:$sew)>, Sched<[WriteVMovXS, ReadVMovXS]>; let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X, isReMaterializable = 1, Constraints = "$rd = $passthru" in - def PseudoVMV_S_X: RISCVVPseudo<(outs VR:$rd), - (ins VR:$passthru, GPR:$rs1, AVL:$vl, sew:$sew), - []>, + def PseudoVMV_S_X : + RISCVVPseudo<(outs VR:$rd), + (ins VR:$passthru, GPR:$rs1, AVL:$vl, sew:$sew)>, Sched<[WriteVMovSX, ReadVMovSX_V, ReadVMovSX_X]>; } } // Predicates = [HasVInstructions] @@ -6721,8 +6714,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { Constraints = "$rd = $passthru" in def "PseudoVFMV_S_" # f.FX : RISCVVPseudo<(outs VR:$rd), - (ins VR:$passthru, f.fprclass:$rs1, AVL:$vl, sew:$sew), - []>, + (ins VR:$passthru, f.fprclass:$rs1, AVL:$vl, sew:$sew)>, Sched<[WriteVMovSF, ReadVMovSF_V, ReadVMovSF_F]>; } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td index 5e013b4..1674c95 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZalasr.td @@ -63,13 +63,14 @@ defm SD : SRL_r_aq_rl<0b011, "sd">; //===----------------------------------------------------------------------===// class PatLAQ<SDPatternOperator OpNode, RVInst Inst, ValueType vt = XLenVT> - : Pat<(vt (OpNode (vt GPRMemZeroOffset:$rs1))), (Inst GPRMemZeroOffset:$rs1)>; + : Pat<(vt (OpNode (XLenVT GPRMemZeroOffset:$rs1))), + (Inst GPRMemZeroOffset:$rs1)>; // n.b. this switches order of arguments // to deal with the fact that SRL has addr, data // while atomic_store has data, addr class PatSRL<SDPatternOperator OpNode, RVInst Inst, ValueType vt = XLenVT> - : Pat<(OpNode (vt GPR:$rs2), (vt GPRMemZeroOffset:$rs1)), + : Pat<(OpNode (vt GPR:$rs2), (XLenVT GPRMemZeroOffset:$rs1)), (Inst GPRMemZeroOffset:$rs1, GPR:$rs2)>; @@ -97,16 +98,15 @@ let Predicates = [HasStdExtZalasr] in { let Predicates = [HasStdExtZalasr, IsRV32] in { def : PatLAQ<acquiring_load<atomic_load_nonext_32>, LW_AQ>; def : PatLAQ<seq_cst_load<atomic_load_nonext_32>, LW_AQ>; - -} // Predicates = [HasStdExtZalasr, IsRV64] +} // Predicates = [HasStdExtZalasr, IsRV32] let Predicates = [HasStdExtZalasr, IsRV64] in { - def : PatLAQ<acquiring_load<atomic_load_asext_32>, LW_AQ>; - def : PatLAQ<seq_cst_load<atomic_load_asext_32>, LW_AQ>; + def : PatLAQ<acquiring_load<atomic_load_asext_32>, LW_AQ, i64>; + def : PatLAQ<seq_cst_load<atomic_load_asext_32>, LW_AQ, i64>; - def : PatLAQ<acquiring_load<atomic_load_nonext_64>, LD_AQ>; - def : PatLAQ<seq_cst_load<atomic_load_nonext_64>, LD_AQ>; + def : PatLAQ<acquiring_load<atomic_load_nonext_64>, LD_AQ, i64>; + def : PatLAQ<seq_cst_load<atomic_load_nonext_64>, LD_AQ, i64>; - def : PatSRL<releasing_store<atomic_store_64>, SD_RL>; - def : PatSRL<seq_cst_store<atomic_store_64>, SD_RL>; + def : PatSRL<releasing_store<atomic_store_64>, SD_RL, i64>; + def : PatSRL<seq_cst_store<atomic_store_64>, SD_RL, i64>; } // Predicates = [HasStdExtZalasr, IsRV64] diff --git a/llvm/lib/Target/Sparc/DelaySlotFiller.cpp b/llvm/lib/Target/Sparc/DelaySlotFiller.cpp index 6c19049..024030d 100644 --- a/llvm/lib/Target/Sparc/DelaySlotFiller.cpp +++ b/llvm/lib/Target/Sparc/DelaySlotFiller.cpp @@ -206,8 +206,8 @@ Filler::findDelayInstr(MachineBasicBlock &MBB, if (!done) --I; - // skip debug instruction - if (I->isDebugInstr()) + // Skip meta instructions. + if (I->isMetaInstruction()) continue; if (I->hasUnmodeledSideEffects() || I->isInlineAsm() || I->isPosition() || diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp index 143c4c4..e7709ef 100644 --- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp +++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp @@ -149,6 +149,10 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI, }); } + getActionDefinitionsBuilder({G_UMIN, G_UMAX, G_SMIN, G_SMAX}) + .widenScalarToNextPow2(0, /*Min=*/32) + .lower(); + // integer addition/subtraction getActionDefinitionsBuilder({G_ADD, G_SUB}) .legalFor({s8, s16, s32}) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index cda5568..02b20b3 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -45457,7 +45457,8 @@ static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src, const SDLoc &DL, const X86Subtarget &Subtarget) { EVT SrcVT = Src.getValueType(); - if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1) + if (Subtarget.useSoftFloat() || !SrcVT.isSimple() || + SrcVT.getScalarType() != MVT::i1) return SDValue(); // Recognize the IR pattern for the movmsk intrinsic under SSE1 before type @@ -58134,6 +58135,14 @@ static SDValue combineAdd(SDNode *N, SelectionDAG &DAG, if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget)) return V; + // Prefer VSHLI to reduce uses, X86FixupInstTunings may revert this depending + // on the scheduler model. Limit multiple users to AVX+ targets to prevent + // introducing extra register moves. + if (Op0 == Op1 && supportedVectorShiftWithImm(VT, Subtarget, ISD::SHL)) + if (Subtarget.hasAVX() || N->isOnlyUserOf(Op0.getNode())) + return getTargetVShiftByConstNode(X86ISD::VSHLI, DL, VT.getSimpleVT(), + Op0, 1, DAG); + // Canonicalize hidden LEA pattern: // Fold (add (sub (shl x, c), y), z) -> (sub (add (shl x, c), z), y) // iff c < 4 diff --git a/llvm/lib/Target/X86/X86LowerAMXType.cpp b/llvm/lib/Target/X86/X86LowerAMXType.cpp index 278ae46..0ba71ad 100644 --- a/llvm/lib/Target/X86/X86LowerAMXType.cpp +++ b/llvm/lib/Target/X86/X86LowerAMXType.cpp @@ -854,6 +854,7 @@ public: : Func(F), SC(ShapeC), DT(nullptr) {} bool combineCastStore(IntrinsicInst *Cast, StoreInst *ST); bool combineLoadCast(IntrinsicInst *Cast, LoadInst *LD); + bool combineTilezero(IntrinsicInst *Cast); bool combineLdSt(SmallVectorImpl<Instruction *> &Casts); bool combineAMXcast(TargetLibraryInfo *TLI); bool transformAMXCast(IntrinsicInst *AMXCast); @@ -1175,6 +1176,26 @@ bool X86LowerAMXCast::combineLoadCast(IntrinsicInst *Cast, LoadInst *LD) { return EraseLoad; } +// %19 = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> zeroinitializer) +// --> +// %19 = tail call x86_amx @llvm.x86.tilezero.internal(i16 %row, i16 %col) +bool X86LowerAMXCast::combineTilezero(IntrinsicInst *Cast) { + Value *Row = nullptr, *Col = nullptr; + Use &U = *(Cast->use_begin()); + unsigned OpNo = U.getOperandNo(); + auto *II = cast<IntrinsicInst>(U.getUser()); + if (!isAMXIntrinsic(II)) + return false; + + std::tie(Row, Col) = SC->getShape(II, OpNo); + + IRBuilder<> Builder(Cast); + Value *NewInst = + Builder.CreateIntrinsic(Intrinsic::x86_tilezero_internal, {}, {Row, Col}); + Cast->replaceAllUsesWith(NewInst); + return true; +} + bool X86LowerAMXCast::combineLdSt(SmallVectorImpl<Instruction *> &Casts) { bool Change = false; for (auto *Cast : Casts) { @@ -1198,6 +1219,14 @@ bool X86LowerAMXCast::combineLdSt(SmallVectorImpl<Instruction *> &Casts) { for (auto *Store : DeadStores) Store->eraseFromParent(); } else { // x86_cast_vector_to_tile + // %19 = tail call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> zeroinitializer) + // --> + // %19 = tail call x86_amx @llvm.x86.tilezero.internal(i16 %row, i16 %col) + if (isa<ConstantAggregateZero>(Cast->getOperand(0))) { + Change |= combineTilezero(cast<IntrinsicInst>(Cast)); + continue; + } + auto *Load = dyn_cast<LoadInst>(Cast->getOperand(0)); if (!Load || !Load->hasOneUse()) continue; @@ -1210,6 +1239,7 @@ bool X86LowerAMXCast::combineLdSt(SmallVectorImpl<Instruction *> &Casts) { // Set the operand is null so that load instruction can be erased. Cast->setOperand(0, nullptr); Load->eraseFromParent(); + Change = true; } } } diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp index c4f1b68..ddb95a4 100644 --- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp +++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp @@ -3981,7 +3981,6 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones( void ModuleCallsiteContextGraph::updateAllocationCall( CallInfo &Call, AllocationType AllocType) { std::string AllocTypeString = getAllocTypeAttributeString(AllocType); - removeAnyExistingAmbiguousAttribute(cast<CallBase>(Call.call())); auto A = llvm::Attribute::get(Call.call()->getFunction()->getContext(), "memprof", AllocTypeString); cast<CallBase>(Call.call())->addFnAttr(A); @@ -5643,7 +5642,6 @@ bool MemProfContextDisambiguation::applyImport(Module &M) { // clone J-1 (J==0 is the original clone and does not have a VMaps // entry). CBClone = cast<CallBase>((*VMaps[J - 1])[CB]); - removeAnyExistingAmbiguousAttribute(CBClone); CBClone->addFnAttr(A); ORE.emit(OptimizationRemark(DEBUG_TYPE, "MemprofAttribute", CBClone) << ore::NV("AllocationCall", CBClone) << " in clone " diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index e4cb4574..07ad65c 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -5780,6 +5780,45 @@ Instruction *InstCombinerImpl::foldICmpWithMinMax(Instruction &I, return nullptr; } +/// Match and fold patterns like: +/// icmp eq/ne X, min(max(X, Lo), Hi) +/// which represents a range check and can be repsented as a ConstantRange. +/// +/// For icmp eq, build ConstantRange [Lo, Hi + 1) and convert to: +/// (X - Lo) u< (Hi + 1 - Lo) +/// For icmp ne, build ConstantRange [Hi + 1, Lo) and convert to: +/// (X - (Hi + 1)) u< (Lo - (Hi + 1)) +Instruction *InstCombinerImpl::foldICmpWithClamp(ICmpInst &I, Value *X, + MinMaxIntrinsic *Min) { + if (!I.isEquality() || !Min->hasOneUse() || !Min->isMin()) + return nullptr; + + const APInt *Lo = nullptr, *Hi = nullptr; + if (Min->isSigned()) { + if (!match(Min->getLHS(), m_OneUse(m_SMax(m_Specific(X), m_APInt(Lo)))) || + !match(Min->getRHS(), m_APInt(Hi)) || !Lo->slt(*Hi)) + return nullptr; + } else { + if (!match(Min->getLHS(), m_OneUse(m_UMax(m_Specific(X), m_APInt(Lo)))) || + !match(Min->getRHS(), m_APInt(Hi)) || !Lo->ult(*Hi)) + return nullptr; + } + + ConstantRange CR = ConstantRange::getNonEmpty(*Lo, *Hi + 1); + ICmpInst::Predicate Pred; + APInt C, Offset; + if (I.getPredicate() == ICmpInst::ICMP_EQ) + CR.getEquivalentICmp(Pred, C, Offset); + else + CR.inverse().getEquivalentICmp(Pred, C, Offset); + + if (!Offset.isZero()) + X = Builder.CreateAdd(X, ConstantInt::get(X->getType(), Offset)); + + return replaceInstUsesWith( + I, Builder.CreateICmp(Pred, X, ConstantInt::get(X->getType(), C))); +} + // Canonicalize checking for a power-of-2-or-zero value: static Instruction *foldICmpPow2Test(ICmpInst &I, InstCombiner::BuilderTy &Builder) { @@ -7467,10 +7506,14 @@ Instruction *InstCombinerImpl::foldICmpCommutative(CmpPredicate Pred, if (Instruction *NI = foldSelectICmp(Pred, SI, Op1, CxtI)) return NI; - if (auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op0)) + if (auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op0)) { if (Instruction *Res = foldICmpWithMinMax(CxtI, MinMax, Op1, Pred)) return Res; + if (Instruction *Res = foldICmpWithClamp(CxtI, Op1, MinMax)) + return Res; + } + { Value *X; const APInt *C; @@ -8527,6 +8570,9 @@ static Instruction *foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI, DenormalMode::getIEEE()) { CI.replaceOperand(I, 0, X); CI.replaceOperand(I, 1, Y); + I.setHasNoInfs(LHSI->hasNoInfs()); + if (LHSI->hasNoNaNs()) + I.setHasNoNaNs(true); return &I; } break; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h index 4f94aa2..e01c145 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -725,6 +725,7 @@ public: Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ); Instruction *foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax, Value *Z, CmpPredicate Pred); + Instruction *foldICmpWithClamp(ICmpInst &Cmp, Value *X, MinMaxIntrinsic *Min); Instruction *foldICmpEquality(ICmpInst &Cmp); Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I); Instruction *foldSignBitTest(ICmpInst &I); diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp index 26e17cc..b9b5b58 100644 --- a/llvm/lib/Transforms/Scalar/GVN.cpp +++ b/llvm/lib/Transforms/Scalar/GVN.cpp @@ -2287,6 +2287,35 @@ bool GVNPass::processLoad(LoadInst *L) { return true; } +// Attempt to process masked loads which have loaded from +// masked stores with the same mask +bool GVNPass::processMaskedLoad(IntrinsicInst *I) { + if (!MD) + return false; + MemDepResult Dep = MD->getDependency(I); + Instruction *DepInst = Dep.getInst(); + if (!DepInst || !Dep.isLocal() || !Dep.isDef()) + return false; + + Value *Mask = I->getOperand(2); + Value *Passthrough = I->getOperand(3); + Value *StoreVal; + if (!match(DepInst, m_MaskedStore(m_Value(StoreVal), m_Value(), m_Value(), + m_Specific(Mask))) || + StoreVal->getType() != I->getType()) + return false; + + // Remove the load but generate a select for the passthrough + Value *OpToForward = llvm::SelectInst::Create(Mask, StoreVal, Passthrough, "", + I->getIterator()); + + ICF->removeUsersOf(I); + I->replaceAllUsesWith(OpToForward); + salvageAndRemoveInstruction(I); + ++NumGVNLoad; + return true; +} + /// Return a pair the first field showing the value number of \p Exp and the /// second field showing whether it is a value number newly created. std::pair<uint32_t, bool> @@ -2734,6 +2763,10 @@ bool GVNPass::processInstruction(Instruction *I) { return false; } + if (match(I, m_Intrinsic<Intrinsic::masked_load>()) && + processMaskedLoad(cast<IntrinsicInst>(I))) + return true; + // For conditional branches, we can perform simple conditional propagation on // the condition value itself. if (BranchInst *BI = dyn_cast<BranchInst>(I)) { diff --git a/llvm/lib/Transforms/Utils/LoopPeel.cpp b/llvm/lib/Transforms/Utils/LoopPeel.cpp index 735bad1..e1dcaa85 100644 --- a/llvm/lib/Transforms/Utils/LoopPeel.cpp +++ b/llvm/lib/Transforms/Utils/LoopPeel.cpp @@ -883,84 +883,6 @@ void llvm::computePeelCount(Loop *L, unsigned LoopSize, } } -struct WeightInfo { - // Weights for current iteration. - SmallVector<uint32_t> Weights; - // Weights to subtract after each iteration. - const SmallVector<uint32_t> SubWeights; -}; - -/// Update the branch weights of an exiting block of a peeled-off loop -/// iteration. -/// Let F is a weight of the edge to continue (fallthrough) into the loop. -/// Let E is a weight of the edge to an exit. -/// F/(F+E) is a probability to go to loop and E/(F+E) is a probability to -/// go to exit. -/// Then, Estimated ExitCount = F / E. -/// For I-th (counting from 0) peeled off iteration we set the weights for -/// the peeled exit as (EC - I, 1). It gives us reasonable distribution, -/// The probability to go to exit 1/(EC-I) increases. At the same time -/// the estimated exit count in the remainder loop reduces by I. -/// To avoid dealing with division rounding we can just multiple both part -/// of weights to E and use weight as (F - I * E, E). -static void updateBranchWeights(Instruction *Term, WeightInfo &Info) { - setBranchWeights(*Term, Info.Weights, /*IsExpected=*/false); - for (auto [Idx, SubWeight] : enumerate(Info.SubWeights)) - if (SubWeight != 0) - // Don't set the probability of taking the edge from latch to loop header - // to less than 1:1 ratio (meaning Weight should not be lower than - // SubWeight), as this could significantly reduce the loop's hotness, - // which would be incorrect in the case of underestimating the trip count. - Info.Weights[Idx] = - Info.Weights[Idx] > SubWeight - ? std::max(Info.Weights[Idx] - SubWeight, SubWeight) - : SubWeight; -} - -/// Initialize the weights for all exiting blocks. -static void initBranchWeights(DenseMap<Instruction *, WeightInfo> &WeightInfos, - Loop *L) { - SmallVector<BasicBlock *> ExitingBlocks; - L->getExitingBlocks(ExitingBlocks); - for (BasicBlock *ExitingBlock : ExitingBlocks) { - Instruction *Term = ExitingBlock->getTerminator(); - SmallVector<uint32_t> Weights; - if (!extractBranchWeights(*Term, Weights)) - continue; - - // See the comment on updateBranchWeights() for an explanation of what we - // do here. - uint32_t FallThroughWeights = 0; - uint32_t ExitWeights = 0; - for (auto [Succ, Weight] : zip(successors(Term), Weights)) { - if (L->contains(Succ)) - FallThroughWeights += Weight; - else - ExitWeights += Weight; - } - - // Don't try to update weights for degenerate case. - if (FallThroughWeights == 0) - continue; - - SmallVector<uint32_t> SubWeights; - for (auto [Succ, Weight] : zip(successors(Term), Weights)) { - if (!L->contains(Succ)) { - // Exit weights stay the same. - SubWeights.push_back(0); - continue; - } - - // Subtract exit weights on each iteration, distributed across all - // fallthrough edges. - double W = (double)Weight / (double)FallThroughWeights; - SubWeights.push_back((uint32_t)(ExitWeights * W)); - } - - WeightInfos.insert({Term, {std::move(Weights), std::move(SubWeights)}}); - } -} - /// Clones the body of the loop L, putting it between \p InsertTop and \p /// InsertBot. /// \param IterNumber The serial number of the iteration currently being @@ -1332,11 +1254,6 @@ bool llvm::peelLoop(Loop *L, unsigned PeelCount, bool PeelLast, LoopInfo *LI, Instruction *LatchTerm = cast<Instruction>(cast<BasicBlock>(Latch)->getTerminator()); - // If we have branch weight information, we'll want to update it for the - // newly created branches. - DenseMap<Instruction *, WeightInfo> Weights; - initBranchWeights(Weights, L); - // Identify what noalias metadata is inside the loop: if it is inside the // loop, the associated metadata must be cloned for each iteration. SmallVector<MDNode *, 6> LoopLocalNoAliasDeclScopes; @@ -1382,11 +1299,6 @@ bool llvm::peelLoop(Loop *L, unsigned PeelCount, bool PeelLast, LoopInfo *LI, assert(DT.verify(DominatorTree::VerificationLevel::Fast)); #endif - for (auto &[Term, Info] : Weights) { - auto *TermCopy = cast<Instruction>(VMap[Term]); - updateBranchWeights(TermCopy, Info); - } - // Remove Loop metadata from the latch branch instruction // because it is not the Loop's latch branch anymore. auto *LatchTermCopy = cast<Instruction>(VMap[LatchTerm]); @@ -1426,15 +1338,38 @@ bool llvm::peelLoop(Loop *L, unsigned PeelCount, bool PeelLast, LoopInfo *LI, } } - for (const auto &[Term, Info] : Weights) { - setBranchWeights(*Term, Info.Weights, /*IsExpected=*/false); - } - // Update Metadata for count of peeled off iterations. unsigned AlreadyPeeled = 0; if (auto Peeled = getOptionalIntLoopAttribute(L, PeeledCountMetaData)) AlreadyPeeled = *Peeled; - addStringMetadataToLoop(L, PeeledCountMetaData, AlreadyPeeled + PeelCount); + unsigned TotalPeeled = AlreadyPeeled + PeelCount; + addStringMetadataToLoop(L, PeeledCountMetaData, TotalPeeled); + + // Update metadata for the estimated trip count. The original branch weight + // metadata is already correct for both the remaining loop and the peeled loop + // iterations, so do not adjust it. + // + // For example, consider what happens when peeling 2 iterations from a loop + // with an estimated trip count of 10 and inserting them before the remaining + // loop. Each of the peeled iterations and each iteration in the remaining + // loop still has the same probability of exiting the *entire original* loop + // as it did when in the original loop, and thus it should still have the same + // branch weights. The peeled iterations' non-zero probabilities of exiting + // already appropriately reduce the probability of reaching the remaining + // iterations just as they did in the original loop. Trying to also adjust + // the remaining loop's branch weights to reflect its new trip count of 8 will + // erroneously further reduce its block frequencies. However, in case an + // analysis later needs to determine the trip count of the remaining loop + // while examining it in isolation without considering the probability of + // actually reaching it, we store the new trip count as separate metadata. + if (auto EstimatedTripCount = getLoopEstimatedTripCount(L)) { + unsigned EstimatedTripCountNew = *EstimatedTripCount; + if (EstimatedTripCountNew < TotalPeeled) + EstimatedTripCountNew = 0; + else + EstimatedTripCountNew -= TotalPeeled; + setLoopEstimatedTripCount(L, EstimatedTripCountNew); + } if (Loop *ParentLoop = L->getParentLoop()) L = ParentLoop; diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 7fa787b..cb6bfb2 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -3903,7 +3903,8 @@ void LoopVectorizationPlanner::emitInvalidCostRemarks( if (VF.isScalar()) continue; - VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, + *CM.PSE.getSE()); precomputeCosts(*Plan, VF, CostCtx); auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry()); for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { @@ -4160,7 +4161,8 @@ VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() { // Add on other costs that are modelled in VPlan, but not in the legacy // cost model. - VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind, + *CM.PSE.getSE()); VPRegionBlock *VectorRegion = P->getVectorLoopRegion(); assert(VectorRegion && "Expected to have a vector region!"); for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>( @@ -6852,7 +6854,7 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF, InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan, ElementCount VF) const { - VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind, *PSE.getSE()); InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx); // Now compute and add the VPlan-based cost. @@ -7085,7 +7087,8 @@ VectorizationFactor LoopVectorizationPlanner::computeBestVF() { // simplifications not accounted for in the legacy cost model. If that's the // case, don't trigger the assertion, as the extra simplifications may cause a // different VF to be picked by the VPlan-based cost model. - VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind, + *CM.PSE.getSE()); precomputeCosts(BestPlan, BestFactor.Width, CostCtx); // Verify that the VPlan-based and legacy cost models agree, except for VPlans // with early exits and plans with additional VPlan simplifications. The @@ -8621,7 +8624,8 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes( // TODO: Enable following transform when the EVL-version of extended-reduction // and mulacc-reduction are implemented. if (!CM.foldTailWithEVL()) { - VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, + *CM.PSE.getSE()); VPlanTransforms::runPass(VPlanTransforms::convertToAbstractRecipes, *Plan, CostCtx, Range); } @@ -8690,7 +8694,7 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) { Plan->addVF(VF); if (!VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( - Plan, + *Plan, [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, @@ -10075,7 +10079,7 @@ bool LoopVectorizePass::processLoop(Loop *L) { bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled; VPCostContext CostCtx(CM.TTI, *CM.TLI, LVP.getPlanFor(VF.Width), CM, - CM.CostKind); + CM.CostKind, *CM.PSE.getSE()); if (!ForceVectorization && !isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx, LVP.getPlanFor(VF.Width), SEL, diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp index 07b191a..2555ebe 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -1772,7 +1772,8 @@ VPCostContext::getOperandInfo(VPValue *V) const { } InstructionCost VPCostContext::getScalarizationOverhead( - Type *ResultTy, ArrayRef<const VPValue *> Operands, ElementCount VF) { + Type *ResultTy, ArrayRef<const VPValue *> Operands, ElementCount VF, + bool AlwaysIncludeReplicatingR) { if (VF.isScalar()) return 0; @@ -1792,7 +1793,11 @@ InstructionCost VPCostContext::getScalarizationOverhead( SmallPtrSet<const VPValue *, 4> UniqueOperands; SmallVector<Type *> Tys; for (auto *Op : Operands) { - if (Op->isLiveIn() || isa<VPReplicateRecipe, VPPredInstPHIRecipe>(Op) || + if (Op->isLiveIn() || + (!AlwaysIncludeReplicatingR && + isa<VPReplicateRecipe, VPPredInstPHIRecipe>(Op)) || + (isa<VPReplicateRecipe>(Op) && + cast<VPReplicateRecipe>(Op)->getOpcode() == Instruction::Load) || !UniqueOperands.insert(Op).second) continue; Tys.push_back(toVectorizedTy(Types.inferScalarType(Op), VF)); diff --git a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h index fc1a09e..1580a3b 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h +++ b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h @@ -349,12 +349,14 @@ struct VPCostContext { LoopVectorizationCostModel &CM; SmallPtrSet<Instruction *, 8> SkipCostComputation; TargetTransformInfo::TargetCostKind CostKind; + ScalarEvolution &SE; VPCostContext(const TargetTransformInfo &TTI, const TargetLibraryInfo &TLI, const VPlan &Plan, LoopVectorizationCostModel &CM, - TargetTransformInfo::TargetCostKind CostKind) + TargetTransformInfo::TargetCostKind CostKind, + ScalarEvolution &SE) : TTI(TTI), TLI(TLI), Types(Plan), LLVMCtx(Plan.getContext()), CM(CM), - CostKind(CostKind) {} + CostKind(CostKind), SE(SE) {} /// Return the cost for \p UI with \p VF using the legacy cost model as /// fallback until computing the cost of all recipes migrates to VPlan. @@ -374,10 +376,12 @@ struct VPCostContext { /// Estimate the overhead of scalarizing a recipe with result type \p ResultTy /// and \p Operands with \p VF. This is a convenience wrapper for the - /// type-based getScalarizationOverhead API. - InstructionCost getScalarizationOverhead(Type *ResultTy, - ArrayRef<const VPValue *> Operands, - ElementCount VF); + /// type-based getScalarizationOverhead API. If \p AlwaysIncludeReplicatingR + /// is true, always compute the cost of scalarizing replicating operands. + InstructionCost + getScalarizationOverhead(Type *ResultTy, ArrayRef<const VPValue *> Operands, + ElementCount VF, + bool AlwaysIncludeReplicatingR = false); }; /// This class can be used to assign names to VPValues. For VPValues without diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index 67b9244..43d61f2 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -40,6 +40,7 @@ #include <cassert> using namespace llvm; +using namespace llvm::VPlanPatternMatch; using VectorParts = SmallVector<Value *, 2>; @@ -303,7 +304,6 @@ VPPartialReductionRecipe::computeCost(ElementCount VF, VPRecipeBase *OpR = Op->getDefiningRecipe(); // If the partial reduction is predicated, a select will be operand 0 - using namespace llvm::VPlanPatternMatch; if (match(getOperand(1), m_Select(m_VPValue(), m_VPValue(Op), m_VPValue()))) { OpR = Op->getDefiningRecipe(); } @@ -1963,7 +1963,6 @@ InstructionCost VPWidenSelectRecipe::computeCost(ElementCount VF, Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF); VPValue *Op0, *Op1; - using namespace llvm::VPlanPatternMatch; if (!ScalarCond && ScalarTy->getScalarSizeInBits() == 1 && (match(this, m_LogicalAnd(m_VPValue(Op0), m_VPValue(Op1))) || match(this, m_LogicalOr(m_VPValue(Op0), m_VPValue(Op1))))) { @@ -3111,6 +3110,62 @@ bool VPReplicateRecipe::shouldPack() const { }); } +/// Returns true if \p Ptr is a pointer computation for which the legacy cost +/// model computes a SCEV expression when computing the address cost. +static bool shouldUseAddressAccessSCEV(const VPValue *Ptr) { + auto *PtrR = Ptr->getDefiningRecipe(); + if (!PtrR || !((isa<VPReplicateRecipe>(PtrR) && + cast<VPReplicateRecipe>(PtrR)->getOpcode() == + Instruction::GetElementPtr) || + isa<VPWidenGEPRecipe>(PtrR) || + match(Ptr, m_GetElementPtr(m_VPValue(), m_VPValue())))) + return false; + + // We are looking for a GEP where all indices are either loop invariant or + // inductions. + for (VPValue *Opd : drop_begin(PtrR->operands())) { + if (!Opd->isDefinedOutsideLoopRegions() && + !isa<VPScalarIVStepsRecipe, VPWidenIntOrFpInductionRecipe>(Opd)) + return false; + } + + return true; +} + +/// Returns true if \p V is used as part of the address of another load or +/// store. +static bool isUsedByLoadStoreAddress(const VPUser *V) { + SmallPtrSet<const VPUser *, 4> Seen; + SmallVector<const VPUser *> WorkList = {V}; + + while (!WorkList.empty()) { + auto *Cur = dyn_cast<VPSingleDefRecipe>(WorkList.pop_back_val()); + if (!Cur || !Seen.insert(Cur).second) + continue; + + for (VPUser *U : Cur->users()) { + if (auto *InterleaveR = dyn_cast<VPInterleaveBase>(U)) + if (InterleaveR->getAddr() == Cur) + return true; + if (auto *RepR = dyn_cast<VPReplicateRecipe>(U)) { + if (RepR->getOpcode() == Instruction::Load && + RepR->getOperand(0) == Cur) + return true; + if (RepR->getOpcode() == Instruction::Store && + RepR->getOperand(1) == Cur) + return true; + } + if (auto *MemR = dyn_cast<VPWidenMemoryRecipe>(U)) { + if (MemR->getAddr() == Cur && MemR->isConsecutive()) + return true; + } + } + + append_range(WorkList, cast<VPSingleDefRecipe>(Cur)->users()); + } + return false; +} + InstructionCost VPReplicateRecipe::computeCost(ElementCount VF, VPCostContext &Ctx) const { Instruction *UI = cast<Instruction>(getUnderlyingValue()); @@ -3218,21 +3273,58 @@ InstructionCost VPReplicateRecipe::computeCost(ElementCount VF, } case Instruction::Load: case Instruction::Store: { - if (isSingleScalar()) { - bool IsLoad = UI->getOpcode() == Instruction::Load; - Type *ValTy = Ctx.Types.inferScalarType(IsLoad ? this : getOperand(0)); - Type *ScalarPtrTy = Ctx.Types.inferScalarType(getOperand(IsLoad ? 0 : 1)); - const Align Alignment = getLoadStoreAlignment(UI); - unsigned AS = getLoadStoreAddressSpace(UI); - TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(UI->getOperand(0)); - InstructionCost ScalarMemOpCost = Ctx.TTI.getMemoryOpCost( - UI->getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo, UI); - return ScalarMemOpCost + Ctx.TTI.getAddressComputationCost( - ScalarPtrTy, nullptr, nullptr, Ctx.CostKind); - } + if (VF.isScalable() && !isSingleScalar()) + return InstructionCost::getInvalid(); + // TODO: See getMemInstScalarizationCost for how to handle replicating and // predicated cases. - break; + const VPRegionBlock *ParentRegion = getParent()->getParent(); + if (ParentRegion && ParentRegion->isReplicator()) + break; + + bool IsLoad = UI->getOpcode() == Instruction::Load; + const VPValue *PtrOp = getOperand(!IsLoad); + // TODO: Handle cases where we need to pass a SCEV to + // getAddressComputationCost. + if (shouldUseAddressAccessSCEV(PtrOp)) + break; + + Type *ValTy = Ctx.Types.inferScalarType(IsLoad ? this : getOperand(0)); + Type *ScalarPtrTy = Ctx.Types.inferScalarType(PtrOp); + const Align Alignment = getLoadStoreAlignment(UI); + unsigned AS = getLoadStoreAddressSpace(UI); + TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(UI->getOperand(0)); + InstructionCost ScalarMemOpCost = Ctx.TTI.getMemoryOpCost( + UI->getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo); + + Type *PtrTy = isSingleScalar() ? ScalarPtrTy : toVectorTy(ScalarPtrTy, VF); + + InstructionCost ScalarCost = + ScalarMemOpCost + Ctx.TTI.getAddressComputationCost( + PtrTy, &Ctx.SE, nullptr, Ctx.CostKind); + if (isSingleScalar()) + return ScalarCost; + + SmallVector<const VPValue *> OpsToScalarize; + Type *ResultTy = Type::getVoidTy(PtrTy->getContext()); + // Set ResultTy and OpsToScalarize, if scalarization is needed. Currently we + // don't assign scalarization overhead in general, if the target prefers + // vectorized addressing or the loaded value is used as part of an address + // of another load or store. + bool PreferVectorizedAddressing = Ctx.TTI.prefersVectorizedAddressing(); + if (PreferVectorizedAddressing || !isUsedByLoadStoreAddress(this)) { + bool EfficientVectorLoadStore = + Ctx.TTI.supportsEfficientVectorElementLoadStore(); + if (!(IsLoad && !PreferVectorizedAddressing) && + !(!IsLoad && EfficientVectorLoadStore)) + append_range(OpsToScalarize, operands()); + + if (!EfficientVectorLoadStore) + ResultTy = Ctx.Types.inferScalarType(this); + } + + return (ScalarCost * VF.getFixedValue()) + + Ctx.getScalarizationOverhead(ResultTy, OpsToScalarize, VF, true); } } diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index acdb379..ca63bf3 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -45,13 +45,13 @@ static cl::opt<bool> EnableWideActiveLaneMask( cl::desc("Enable use of wide get active lane mask instructions")); bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( - VPlanPtr &Plan, + VPlan &Plan, function_ref<const InductionDescriptor *(PHINode *)> GetIntOrFpInductionDescriptor, const TargetLibraryInfo &TLI) { ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT( - Plan->getVectorLoopRegion()); + Plan.getVectorLoopRegion()); for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) { // Skip blocks outside region if (!VPBB->getParent()) @@ -77,11 +77,11 @@ bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( for (VPValue *Op : PhiR->operands()) NewRecipe->addOperand(Op); } else { - VPValue *Start = Plan->getOrAddLiveIn(II->getStartValue()); + VPValue *Start = Plan.getOrAddLiveIn(II->getStartValue()); VPValue *Step = - vputils::getOrCreateVPValueForSCEVExpr(*Plan, II->getStep()); + vputils::getOrCreateVPValueForSCEVExpr(Plan, II->getStep()); NewRecipe = new VPWidenIntOrFpInductionRecipe( - Phi, Start, Step, &Plan->getVF(), *II, Ingredient.getDebugLoc()); + Phi, Start, Step, &Plan.getVF(), *II, Ingredient.getDebugLoc()); } } else { assert(isa<VPInstruction>(&Ingredient) && @@ -1110,8 +1110,7 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) { // x && !x -> 0 if (match(&R, m_LogicalAnd(m_VPValue(X), m_Not(m_Deferred(X))))) - return Def->replaceAllUsesWith(Plan->getOrAddLiveIn( - ConstantInt::getFalse(VPTypeAnalysis(*Plan).inferScalarType(Def)))); + return Def->replaceAllUsesWith(Plan->getFalse()); if (match(Def, m_Select(m_VPValue(), m_VPValue(X), m_Deferred(X)))) return Def->replaceAllUsesWith(X); @@ -3346,12 +3345,7 @@ void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan) { VectorStep = Builder.createWidenCast(CastOp, VectorStep, IVTy); } - [[maybe_unused]] auto *ConstStep = - ScalarStep->isLiveIn() - ? dyn_cast<ConstantInt>(ScalarStep->getLiveInIRValue()) - : nullptr; - assert(!ConstStep || ConstStep->getValue() != 1); - (void)ConstStep; + assert(!match(ScalarStep, m_One()) && "Expected non-unit scalar-step"); if (TypeInfo.inferScalarType(ScalarStep) != IVTy) { ScalarStep = Builder.createWidenCast(Instruction::Trunc, ScalarStep, IVTy); diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h index 4c65cb7..2f00e51 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h @@ -138,7 +138,7 @@ struct VPlanTransforms { /// widen recipes. Returns false if any VPInstructions could not be converted /// to a wide recipe if needed. LLVM_ABI_FOR_TEST static bool tryToConvertVPInstructionsToVPRecipes( - VPlanPtr &Plan, + VPlan &Plan, function_ref<const InductionDescriptor *(PHINode *)> GetIntOrFpInductionDescriptor, const TargetLibraryInfo &TLI); diff --git a/llvm/runtimes/CMakeLists.txt b/llvm/runtimes/CMakeLists.txt index 6f98eae..8399292 100644 --- a/llvm/runtimes/CMakeLists.txt +++ b/llvm/runtimes/CMakeLists.txt @@ -507,14 +507,10 @@ if(build_runtimes) endif() # Forward user-provived system configuration to runtimes for requirement introspection. - # CMAKE_PREFIX_PATH is the search path for CMake packages. In order to pass through - # the command line interface, the CMake semicolon separator needs to be replaced - # with $<SEMICOLON> + # CMAKE_PREFIX_PATH is the search path for CMake packages. if(CMAKE_PREFIX_PATH) - string(JOIN "$<SEMICOLON>" escaped_cmake_prefix_path ${CMAKE_PREFIX_PATH}) - list(APPEND extra_cmake_args "-DCMAKE_PREFIX_PATH=${escaped_cmake_prefix_path}") + list(APPEND extra_cmake_args "-DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH}") endif() - # CMAKE_PROGRAM_PATH is the search path for executables such as python. if(CMAKE_PROGRAM_PATH) list(APPEND extra_cmake_args "-DCMAKE_PROGRAM_PATH=${CMAKE_PROGRAM_PATH}") diff --git a/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll b/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll index a08f859..05aad8a 100644 --- a/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/early-exit-runtime-checks.ll @@ -756,3 +756,129 @@ e.1: e.2: ret void } + +define void @all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_nofree_via_context(ptr %A, ptr %B) nosync { +; CHECK-LABEL: 'all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_nofree_via_context' +; CHECK-NEXT: loop.header: +; CHECK-NEXT: Memory dependences are safe with run-time checks +; CHECK-NEXT: Dependences: +; CHECK-NEXT: Run-time memory checks: +; CHECK-NEXT: Check 0: +; CHECK-NEXT: Comparing group GRP0: +; CHECK-NEXT: %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv +; CHECK-NEXT: Against group GRP1: +; CHECK-NEXT: %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv +; CHECK-NEXT: Grouped accesses: +; CHECK-NEXT: Group GRP0: +; CHECK-NEXT: (Low: %B High: (2000 + %B)) +; CHECK-NEXT: Member: {%B,+,4}<nuw><%loop.header> +; CHECK-NEXT: Group GRP1: +; CHECK-NEXT: (Low: %A High: (2000 + %A)) +; CHECK-NEXT: Member: {%A,+,4}<nuw><%loop.header> +; CHECK-EMPTY: +; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. +; CHECK-NEXT: SCEV assumptions: +; CHECK-EMPTY: +; CHECK-NEXT: Expressions re-written: +; +entry: + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %A, i64 2000) ] + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %B, i64 2000) ] + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ] + %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv + %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv + %l = load i32, ptr %gep.A, align 4 + store i32 0, ptr %gep.B, align 4 + %cntable.c.1 = icmp ult i64 %iv, 1000 + %iv.next = add nuw nsw i64 %iv, 1 + br i1 %cntable.c.1, label %b2, label %e.1 + +b2: + %uncntable.c.0 = icmp eq i32 %l, 0 + br i1 %uncntable.c.0, label %e.2, label %b3 + +b3: + %cntable.c.2 = icmp eq i64 %iv.next, 500 + br i1 %cntable.c.2, label %cleanup4, label %latch + +latch: + br label %loop.header + +cleanup4: + ret void + +e.1: + ret void + +e.2: + ret void +} + +define void @all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_missing_nofree_multiple_predecessors(ptr %A, ptr %B, i1 %c) nosync { +; CHECK-LABEL: 'all_exits_dominate_latch_countable_exits_at_most_500_iterations_known_deref_via_assumption_missing_nofree_multiple_predecessors' +; CHECK-NEXT: loop.header: +; CHECK-NEXT: Memory dependences are safe with run-time checks +; CHECK-NEXT: Dependences: +; CHECK-NEXT: Run-time memory checks: +; CHECK-NEXT: Check 0: +; CHECK-NEXT: Comparing group GRP0: +; CHECK-NEXT: %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv +; CHECK-NEXT: Against group GRP1: +; CHECK-NEXT: %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv +; CHECK-NEXT: Grouped accesses: +; CHECK-NEXT: Group GRP0: +; CHECK-NEXT: (Low: %B High: inttoptr (i64 -1 to ptr)) +; CHECK-NEXT: Member: {%B,+,4}<nuw><%loop.header> +; CHECK-NEXT: Group GRP1: +; CHECK-NEXT: (Low: %A High: inttoptr (i64 -1 to ptr)) +; CHECK-NEXT: Member: {%A,+,4}<nuw><%loop.header> +; CHECK-EMPTY: +; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. +; CHECK-NEXT: SCEV assumptions: +; CHECK-EMPTY: +; CHECK-NEXT: Expressions re-written: +; +entry: + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %A, i64 2000) ] + call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %B, i64 2000) ] + br i1 %c, label %then, label %else + +then: + br label %loop.header + +else: + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %then ], [ 0, %else ], [ %iv.next, %latch ] + %gep.A = getelementptr inbounds i32, ptr %A, i64 %iv + %gep.B = getelementptr inbounds i32, ptr %B, i64 %iv + %l = load i32, ptr %gep.A, align 4 + store i32 0, ptr %gep.B, align 4 + %cntable.c.1 = icmp ult i64 %iv, 1000 + %iv.next = add nuw nsw i64 %iv, 1 + br i1 %cntable.c.1, label %b2, label %e.1 + +b2: + %uncntable.c.0 = icmp eq i32 %l, 0 + br i1 %uncntable.c.0, label %e.2, label %b3 + +b3: + %cntable.c.2 = icmp eq i64 %iv.next, 500 + br i1 %cntable.c.2, label %cleanup4, label %latch + +latch: + br label %loop.header + +cleanup4: + ret void + +e.1: + ret void + +e.2: + ret void +} diff --git a/llvm/test/Bitcode/DW_OP_rot_neg_abs.ll b/llvm/test/Bitcode/DW_OP_rot_neg_abs.ll new file mode 100644 index 0000000..e185530 --- /dev/null +++ b/llvm/test/Bitcode/DW_OP_rot_neg_abs.ll @@ -0,0 +1,10 @@ +;; This test checks the validity of DWARF operators DW_OP_rot, DW_OP_neg, and DW_OP_abs. + +; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s + +; CHECK: !DIExpression(DW_OP_push_object_address, DW_OP_lit0, DW_OP_lit0, DW_OP_neg, DW_OP_abs, DW_OP_rot, DW_OP_rot, DW_OP_rot, DW_OP_plus, DW_OP_plus) + +; ModuleID = 'DW_OP_rot_neg_abs.adb' +source_filename = "/dir/DW_OP_rot_neg_abs.ll" + +!named = !{!DIExpression(DW_OP_push_object_address, DW_OP_lit0, DW_OP_lit0, DW_OP_neg, DW_OP_abs, DW_OP_rot, DW_OP_rot, DW_OP_rot, DW_OP_plus, DW_OP_plus)} diff --git a/llvm/test/CodeGen/AArch64/framelayout-split-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-split-sve.mir new file mode 100644 index 0000000..35eafe8 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/framelayout-split-sve.mir @@ -0,0 +1,587 @@ +# RUN: llc -mattr=+sve -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -mtriple=aarch64-none-linux-gnu -run-pass=prologepilog %s -o - | FileCheck %s +# RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+sve -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -start-before=prologepilog %s -o - | FileCheck %s --check-prefix=ASM +# RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+sve -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -start-before=prologepilog %s -filetype=obj -o %t +# RUN: llvm-objdump --dwarf=frames %t | FileCheck %s --check-prefix=UNWINDINFO +# RUN: rm -rf %t +# +# Test allocation and deallocation of SVE objects on the stack with +# split-sve-objects (and hazard padding) enabled. This also tests using a +# combination of scalable and non-scalable offsets to access the SVE on the +# stack. +# +# With split-sve-objects (which implies hazard padding) the SVE area is split +# into PPR and ZPR areas with (fixed-size) hazard padding between them. The PPR +# area holds all scalable predicate callee saves and locals, and the ZPR area +# holds all scalable vector callee saves and locals. Additionally, any FPR +# callee save is promoted to a ZPR callee save (to avoid needing additional +# hazard padding in the callee save area). +# +# +-------------+ +# | stack arg | +# +-------------+ <- SP before call +# | Callee Saves| +# | Frame record| (if available) +# |-------------| <- FP (if available) +# | PPR area | +# |-------------| +# |/////////////| hazard padding +# |-------------| +# | ZPR area | +# +-------------+ +# | : | +# | Stack objs | +# | : | +# +-------------+ <- SP after call and frame-setup +# +--- | + + define void @test_allocate_split_sve() uwtable { entry: unreachable } + define void @test_allocate_split_sve_realigned() uwtable { entry: unreachable } + define void @test_address_split_sve() uwtable { entry: unreachable } + define void @test_address_split_sve_fp() uwtable { entry: unreachable } + define aarch64_sve_vector_pcs void @save_restore_ppr_zpr() uwtable { entry: unreachable } + +... +--- +# +----------+ +# |scratchreg| // x29 is used as scratch reg. +# |----------| +# | %stack.0 | // scalable predicate of n * 12 bytes, aligned to 16 bytes +# | | // to be materialized with 1*ADDVL (<=> n * 16 bytes) +# |----------| +# |//////////| // hazard padding (1024 bytes) -- part of PPR locals area +# |//////////| // Note: This is currently not included in the "stackSize" +# +----------+ +# | %stack.0 | // scalable SVE object of n * 18 bytes, aligned to 16 bytes, +# | | // to be materialized with 2*ADDVL (<=> 2 * n * 16 bytes) +# +----------+ +# |//////////| // hazard padding (1024 bytes) +# |----------| +# | %stack.1 | // not scalable +# +----------+ <- SP + +# CHECK-LABEL: name: test_allocate_split_sve +# CHECK: stackSize: 1056 + +# CHECK: bb.0.entry: +# CHECK: liveins: $z0, $p0, $fp +# CHECK: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.4) +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 1040 +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 +# +# CHECK-NEXT: $x8 = ADDXri $sp, 1040, 0 +# CHECK-NEXT: $x8 = ADDPL_XXI $x8, 7, implicit $vg +# CHECK-NEXT: STR_ZXI $z0, killed $x8, 0 :: (store (<vscale x 1 x s128>) into %stack.0) +# CHECK-NEXT: $x8 = ADDXri $sp, 2064, 0 +# CHECK-NEXT: STR_PXI $p0, killed $x8, 18 :: (store (<vscale x 1 x s16>) into %stack.1) +# +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 1056 +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.4) +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29 +# CHECK-NEXT: RET_ReallyLR + +# ASM-LABEL: test_allocate_split_sve: +# ASM: str x29, [sp, #-16]! +# ASM-NEXT: .cfi_def_cfa_offset 16 +# ASM-NEXT: .cfi_offset w29, -16 +# ASM-NEXT: sub sp, sp, #1024 +# ASM-NEXT: .cfi_def_cfa_offset 1040 +# ASM-NEXT: addvl sp, sp, #-1 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1040 + 8 * VG +# ASM-NEXT: sub sp, sp, #1040 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG +# ASM-NEXT: addvl sp, sp, #-2 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 2080 + 24 * VG +# +# ASM: addvl sp, sp, #2 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG +# ASM-NEXT: add sp, sp, #1024 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1056 + 8 * VG +# ASM-NEXT: addvl sp, sp, #1 +# ASM-NEXT: .cfi_def_cfa wsp, 1056 +# ASM-NEXT: add sp, sp, #1040 +# ASM-NEXT: .cfi_def_cfa_offset 16 +# ASM-NEXT: ldr x29, [sp], #16 +# ASM-NEXT: .cfi_def_cfa_offset 0 +# ASM-NEXT: .cfi_restore w29 + +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 +# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_def_cfa_offset: +1040 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus +# +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1056, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa: reg31 +1056 +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 +# UNWINDINFO: DW_CFA_def_cfa_offset: +0 +# UNWINDINFO-NEXT: DW_CFA_restore: reg29 + +name: test_allocate_split_sve +stack: + - { id: 0, stack-id: scalable-vector, size: 18, alignment: 2 } + - { id: 1, stack-id: scalable-vector, size: 12, alignment: 2 } + - { id: 2, stack-id: default, size: 16, alignment: 8 } +body: | + bb.0.entry: + liveins: $z0, $p0 + STR_ZXI $z0, %stack.0, 0 :: (store (<vscale x 1 x s128>) into %stack.0) + STR_PXI $p0, %stack.1, 0 :: (store (<vscale x 1 x s16>) into %stack.1) + RET_ReallyLR +... +--- + +# Stack realignment is not supported with split-sve-objects, so we fallback to +# the default hazard padding implementation. This does not prevent hazards +# between ZPRs and PPRs (TODO: support this case). +# +# +----------+ +# | lr, fp | // frame record +# |----------| +# |//////////| // hazard padding (1024 bytes) +# |----------| +# | %stack.0 | // scalable predicate of n * 12 bytes, aligned to 16 bytes +# | | // to be materialized with 1*ADDVL (<=> n * 16 bytes) +# +----------+ +# | %stack.0 | // scalable SVE object of n * 18 bytes, aligned to 16 bytes, +# | | // to be materialized with 2*ADDVL (<=> 2 * n * 16 bytes) +# +----------+ +# |//////////| // hazard padding (1024 bytes) +# |----------| +# | %stack.1 | // not scalable +# +----------+ <- SP + +name: test_allocate_split_sve_realigned +stack: + - { id: 0, stack-id: scalable-vector, size: 18, alignment: 2 } + - { id: 1, stack-id: scalable-vector, size: 12, alignment: 2 } + - { id: 2, stack-id: default, size: 16, alignment: 32 } +body: | + bb.0.entry: + liveins: $z0, $p0 + STR_ZXI $z0, %stack.0, 0 :: (store (<vscale x 1 x s128>) into %stack.0) + STR_PXI $p0, %stack.1, 0 :: (store (<vscale x 1 x s16>) into %stack.1) + RET_ReallyLR + +# CHECK-LABEL: name: test_allocate_split_sve_realigned +# CHECK: stackSize: 2080 + +# CHECK: bb.0.entry: +# CHECK: liveins: $z0, $p0, $lr +# CHECK: $sp = frame-setup SUBXri $sp, 1040, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 1040 +# CHECK-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.5) +# CHECK-NEXT: frame-setup STRXui killed $lr, $sp, 129 :: (store (s64) into %stack.4) +# CHECK-NEXT: $fp = frame-setup ADDXri $sp, 1024, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $w29, 16 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w30, -8 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 +# CHECK-NEXT: $[[TMP:x[0-9]+]] = frame-setup SUBXri $sp, 1040, 0 +# CHECK-NEXT: $[[TMP]] = frame-setup ADDVL_XXI $[[TMP]], -2, implicit $vg +# CHECK-NEXT: $sp = frame-setup ANDXri killed $x9, 7930 +# +# CHECK-NEXT: $x8 = SUBXri $fp, 1024, 0 +# CHECK-NEXT: $x8 = ADDPL_XXI $x8, -1, implicit $vg +# CHECK-NEXT: STR_ZXI $z0, killed $x8, -1 :: (store (<vscale x 1 x s128>) into %stack.0) +# CHECK-NEXT: $x8 = SUBXri $fp, 1024, 0 +# CHECK-NEXT: STR_PXI $p0, killed $x8, -15 :: (store (<vscale x 1 x s16>) into %stack.1) +# +# CHECK-NEXT: $sp = frame-destroy SUBXri $fp, 1024, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 1040 +# CHECK-NEXT: $lr = frame-destroy LDRXui $sp, 129 :: (load (s64) from %stack.4) +# CHECK-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.5) +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w30 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29 +# CHECK-NEXT: RET_ReallyLR + +# ASM-LABEL: test_allocate_split_sve_realigned +# ASM: sub sp, sp, #1040 +# ASM-NEXT: .cfi_def_cfa_offset 1040 +# ASM-NEXT: str x29, [sp, #1024] +# ASM-NEXT: str x30, [sp, #1032] +# ASM-NEXT: add x29, sp, #1024 +# ASM-NEXT: .cfi_def_cfa w29, 16 +# ASM-NEXT: .cfi_offset w30, -8 +# ASM-NEXT: .cfi_offset w29, -16 +# +# ASM: sub sp, x29, #1024 +# ASM-NEXT: .cfi_def_cfa wsp, 1040 +# ASM-NEXT: ldr x30, [sp, #1032] +# ASM-NEXT: ldr x29, [sp, #1024] +# ASM-NEXT: add sp, sp, #1040 +# ASM-NEXT: .cfi_def_cfa_offset 0 +# ASM-NEXT: .cfi_restore w30 +# ASM-NEXT: .cfi_restore w29 + +# UNWINDINFO: DW_CFA_def_cfa_offset: +1040 +# UNWINDINFO: DW_CFA_def_cfa: reg29 +16 +# UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8 +# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# +# UNWINDINFO: DW_CFA_def_cfa: reg31 +1040 +# UNWINDINFO: DW_CFA_def_cfa_offset: +0 +# UNWINDINFO-NEXT: DW_CFA_restore: reg30 +# UNWINDINFO-NEXT: DW_CFA_restore: reg29 +... +--- + +# +----------+ +# |scratchreg| // x29 is used as scratch reg. +# +----------+ +# | %stack.2 | // scalable predicate @ SP + 2064b + 46 scalable bytes +# |----------| +# |//////////| // hazard padding (1024 bytes) -- part of PPR locals area +# |//////////| // Note: This is currently not included in the "stackSize" +# |----------| +# | %stack.0 | // scalable vector @ SP + 1040b + 16 scalable bytes +# | %stack.1 | // scalable vector @ SP + 1040b +# +----------+ +# |//////////| // hazard padding (1024 bytes) +# |----------| +# | %stack.3 | // not scalable +# +----------+ <- SP + +# CHECK-LABEL: name: test_address_split_sve +# CHECK: stackSize: 1056 + +# CHECK: bb.0.entry: +# CHECK-NEXT: liveins: +# CHECK-NEXT: {{ $}} +# CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.5) +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 1040 +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 +# +# CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 1040, 0 +# CHECK-NEXT: STR_ZXI $z0, killed $[[TMP]], 1 +# CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 1040, 0 +# CHECK-NEXT: STR_ZXI $z1, killed $[[TMP]], 0 +# CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 2064, 0 +# CHECK-NEXT: STR_PXI $p0, killed $[[TMP]], 23 +# +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 1056 +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.5) +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29 +# CHECK-NEXT: RET_ReallyLR + +# ASM-LABEL: test_address_split_sve +# ASM: str x29, [sp, #-16]! +# ASM-NEXT: .cfi_def_cfa_offset 16 +# ASM-NEXT: .cfi_offset w29, -16 +# ASM-NEXT: sub sp, sp, #1024 +# ASM-NEXT: .cfi_def_cfa_offset 1040 +# ASM-NEXT: addvl sp, sp, #-1 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1040 + 8 * VG +# ASM-NEXT: sub sp, sp, #1040 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG +# ASM-NEXT: addvl sp, sp, #-2 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 2080 + 24 * VG +# +# ASM: addvl sp, sp, #2 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG +# ASM-NEXT: add sp, sp, #1024 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1056 + 8 * VG +# ASM-NEXT: addvl sp, sp, #1 +# ASM-NEXT: .cfi_def_cfa wsp, 1056 +# ASM-NEXT: add sp, sp, #1040 +# ASM-NEXT: .cfi_def_cfa_offset 16 +# ASM-NEXT: ldr x29, [sp], #16 +# ASM-NEXT: .cfi_def_cfa_offset 0 +# ASM-NEXT: .cfi_restore w29 + +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 +# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_def_cfa_offset: +1040 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus +# +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1056, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa: reg31 +1056 +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 +# UNWINDINFO: DW_CFA_def_cfa_offset: +0 +# UNWINDINFO-NEXT: DW_CFA_restore: reg29 + +name: test_address_split_sve +frameInfo: + maxAlignment: 16 +stack: + - { id: 0, stack-id: scalable-vector, size: 16, alignment: 8 } + - { id: 1, stack-id: scalable-vector, size: 16, alignment: 8 } + - { id: 2, stack-id: scalable-vector, size: 2, alignment: 2 } + - { id: 3, stack-id: default, size: 16, alignment: 8 } +body: | + bb.0.entry: + liveins: $z0, $z1, $p0 + + STR_ZXI $z0, %stack.0, 0 :: (store (<vscale x 1 x s128>) into %stack.0) + STR_ZXI $z1, %stack.1, 0 :: (store (<vscale x 1 x s128>) into %stack.1) + STR_PXI $p0, %stack.2, 0 :: (store (<vscale x 1 x s16>) into %stack.2) + + RET_ReallyLR +... +--- +# +----------+ +# | lr, fp | // frame record +# +----------+ <- FP +# | %stack.2 | // scalable predicate @ FP - 2 scalable bytes +# |----------| +# |//////////| // hazard padding (1024 bytes) -- part of PPR locals area +# |//////////| // Note: This is currently not included in the "stackSize" +# |----------| +# | %stack.0 | // scalable vector @ FP - 1024b - 32 scalable bytes +# | %stack.1 | // scalable vector @ FP - 1024b - 48 scalable bytes +# +----------+ +# |//////////| // hazard padding (1024 bytes) +# |----------| +# | %stack.3 | // not scalable +# +----------+ <- SP + +# CHECK-LABEL: name: test_address_split_sve_fp +# CHECK: stackSize: 1056 +# +# CHECK: bb.0.entry: +# CHECK-NEXT: liveins: +# CHECK-NEXT: {{ $}} +# CHECK-NEXT: early-clobber $sp = frame-setup STPXpre killed $fp, killed $lr, $sp, -2 :: (store (s64) into %stack.6), (store (s64) into %stack.5) +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: $fp = frame-setup ADDXri $sp, 0, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $w29, 16 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w30, -8 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0 +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg +# +# CHECK-NEXT: $[[TMP:x[0-9]+]] = SUBXri $fp, 1024, 0 +# CHECK-NEXT: STR_ZXI $z0, killed $[[TMP]], -2 +# CHECK-NEXT: $[[TMP:x[0-9]+]] = SUBXri $fp, 1024, 0 +# CHECK-NEXT: STR_ZXI $z1, killed $[[TMP]], -3 +# CHECK-NEXT: STR_PXI $p0, $fp, -1 +# +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 +# CHECK-NEXT: early-clobber $sp, $fp, $lr = frame-destroy LDPXpost $sp, 2 :: (load (s64) from %stack.6), (load (s64) from %stack.5) +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w30 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29 +# CHECK-NEXT: RET_ReallyLR + +# ASM-LABEL: test_address_split_sve_fp +# ASM: stp x29, x30, [sp, #-16]! +# ASM-NEXT: .cfi_def_cfa_offset 16 +# ASM-NEXT: mov x29, sp +# ASM-NEXT: .cfi_def_cfa w29, 16 +# ASM-NEXT: .cfi_offset w30, -8 +# ASM-NEXT: .cfi_offset w29, -16 +# ASM-NEXT: sub sp, sp, #1024 +# ASM-NEXT: addvl sp, sp, #-1 +# ASM-NEXT: sub sp, sp, #1040 +# ASM-NEXT: addvl sp, sp, #-2 +# +# ASM: addvl sp, sp, #2 +# ASM-NEXT: add sp, sp, #1024 +# ASM-NEXT: addvl sp, sp, #1 +# ASM-NEXT: add sp, sp, #1040 +# ASM-NEXT: .cfi_def_cfa wsp, 16 +# ASM-NEXT: ldp x29, x30, [sp], #16 +# ASM-NEXT: .cfi_def_cfa_offset 0 +# ASM-NEXT: .cfi_restore w30 +# ASM-NEXT: .cfi_restore w29 + +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 +# UNWINDINFO: DW_CFA_def_cfa: reg29 +16 +# UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8 +# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# +# UNWINDINFO: DW_CFA_def_cfa: reg31 +16 +# UNWINDINFO: DW_CFA_def_cfa_offset: +0 +# UNWINDINFO-NEXT: DW_CFA_restore: reg30 +# UNWINDINFO-NEXT: DW_CFA_restore: reg29 + +name: test_address_split_sve_fp +frameInfo: + maxAlignment: 16 + isFrameAddressTaken: true +stack: + - { id: 0, stack-id: scalable-vector, size: 16, alignment: 8 } + - { id: 1, stack-id: scalable-vector, size: 16, alignment: 8 } + - { id: 2, stack-id: scalable-vector, size: 2, alignment: 2 } + - { id: 3, stack-id: default, size: 16, alignment: 8 } +body: | + bb.0.entry: + liveins: $z0, $z1, $p0 + + STR_ZXI $z0, %stack.0, 0 :: (store (<vscale x 1 x s128>) into %stack.0) + STR_ZXI $z1, %stack.1, 0 :: (store (<vscale x 1 x s128>) into %stack.1) + STR_PXI $p0, %stack.2, 0 :: (store (<vscale x 1 x s16>) into %stack.2) + + RET_ReallyLR +... +--- +# CHECK-LABEL: name: save_restore_ppr_zpr +# CHECK: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.8) +# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16 +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: frame-setup STR_PXI killed $p6, $sp, 5 :: (store (s16) into %stack.7) +# CHECK-NEXT: frame-setup STR_PXI killed $p5, $sp, 6 :: (store (s16) into %stack.6) +# CHECK-NEXT: frame-setup STR_PXI killed $p4, $sp, 7 :: (store (s16) into %stack.5) +# +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# +# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3, implicit $vg +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 +# CHECK-NEXT: frame-setup STR_ZXI killed $z10, $sp, 0 :: (store (s128) into %stack.4) +# CHECK-NEXT: frame-setup STR_ZXI killed $z9, $sp, 1 :: (store (s128) into %stack.3) +# CHECK-NEXT: frame-setup STR_ZXI killed $z8, $sp, 2 :: (store (s128) into %stack.2) +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 +# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1056, 0 +# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0a, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 +# +# +# CHECK: $sp = frame-destroy ADDXri $sp, 1056, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 +# CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.4) +# CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.3) +# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.2) +# +# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 +# +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 3, implicit $vg +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z9 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z10 +# CHECK-NEXT: $p6 = frame-destroy LDR_PXI $sp, 5 :: (load (s16) from %stack.7) +# CHECK-NEXT: $p5 = frame-destroy LDR_PXI $sp, 6 :: (load (s16) from %stack.6) +# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 7 :: (load (s16) from %stack.5) +# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16 +# CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.8) +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0 +# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29 +# CHECK-NEXT: RET_ReallyLR + +# ASM-LABEL: save_restore_ppr_zpr: +# ASM: str x29, [sp, #-16]! +# ASM-NEXT: .cfi_def_cfa_offset 16 +# ASM-NEXT: .cfi_offset w29, -16 +# ASM-NEXT: addvl sp, sp, #-1 +# ASM-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG +# ASM-NEXT: str p6, [sp, #5, mul vl] +# ASM-NEXT: str p5, [sp, #6, mul vl] +# ASM-NEXT: str p4, [sp, #7, mul vl] +# ASM-NEXT: sub sp, sp, #1024 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1040 + 8 * VG +# ASM-NEXT: addvl sp, sp, #-3 +# ASM-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 1040 + 32 * VG +# ASM-NEXT: str z10, [sp] +# ASM-NEXT: str z9, [sp, #1, mul vl] +# ASM-NEXT: str z8, [sp, #2, mul vl] +# ASM-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 16 * VG - 1040 +# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d9 @ cfa - 24 * VG - 1040 +# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d10 @ cfa - 32 * VG - 1040 +# ASM-NEXT: sub sp, sp, #1056 +# ASM-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 2096 + 32 * VG +# +# ASM: add sp, sp, #1056 +# ASM-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 1040 + 32 * VG +# ASM-NEXT: ldr z10, [sp] +# ASM-NEXT: ldr z9, [sp, #1, mul vl] +# ASM-NEXT: ldr z8, [sp, #2, mul vl] +# ASM-NEXT: add sp, sp, #1024 +# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG +# ASM-NEXT: addvl sp, sp, #3 +# ASM-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG +# ASM-NEXT: .cfi_restore z8 +# ASM-NEXT: .cfi_restore z9 +# ASM-NEXT: .cfi_restore z10 +# ASM-NEXT: ldr p6, [sp, #5, mul vl] +# ASM-NEXT: ldr p5, [sp, #6, mul vl] +# ASM-NEXT: ldr p4, [sp, #7, mul vl] +# ASM-NEXT: addvl sp, sp, #1 +# ASM-NEXT: .cfi_def_cfa wsp, 16 +# ASM-NEXT: ldr x29, [sp], #16 +# ASM-NEXT: .cfi_def_cfa_offset 0 +# ASM-NEXT: .cfi_restore w29 + +# UNWINDINFO: DW_CFA_def_cfa_offset: +16 +# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16 +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_consts -1040, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_consts -1040, DW_OP_plus +# UNWINDINFO: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -32, DW_OP_mul, DW_OP_plus, DW_OP_consts -1040, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2096, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus +# +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus +# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus +# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104 +# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105 +# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg106 +# UNWINDINFO: DW_CFA_def_cfa: reg31 +16 +# UNWINDINFO: DW_CFA_def_cfa_offset: +0 +# UNWINDINFO-NEXT: DW_CFA_restore: reg29 + +name: save_restore_ppr_zpr +stack: + - { id: 0, stack-id: default, size: 32, alignment: 16 } +body: | + bb.0.entry: + + $p4 = IMPLICIT_DEF + $p5 = IMPLICIT_DEF + $p6 = IMPLICIT_DEF + $z8 = IMPLICIT_DEF + $z9 = IMPLICIT_DEF + $z10 = IMPLICIT_DEF + + RET_ReallyLR diff --git a/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir b/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir index bff0cac..0298168 100644 --- a/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir +++ b/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir @@ -983,26 +983,22 @@ body: | ; EXPAND-LABEL: name: zpr_predicate_spill_p4_saved ; EXPAND: liveins: $p0, $p1, $p2, $p3, $fp, $p8, $p4 ; EXPAND-NEXT: {{ $}} - ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0 - ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.3) + ; EXPAND-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2) ; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.2) + ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.1) ; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0 - ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.1) - ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0 + ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.0) ; ; EXPAND-NEXT: $p8 = IMPLICIT_DEF ; - ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0 - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.2) + ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.1) ; EXPAND-NEXT: $p4 = frame-destroy PTRUE_B 31, implicit $vg ; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv - ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.1) + ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.0) ; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv ; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg - ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.3) - ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0 + ; EXPAND-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2) ; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $p1, implicit $p2, implicit $p3 ; If we spill a register above p8, p4 must also be saved, so we can guarantee diff --git a/llvm/test/CodeGen/AArch64/split-sve-stack-frame-layout.ll b/llvm/test/CodeGen/AArch64/split-sve-stack-frame-layout.ll new file mode 100644 index 0000000..690a39d --- /dev/null +++ b/llvm/test/CodeGen/AArch64/split-sve-stack-frame-layout.ll @@ -0,0 +1,824 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 | FileCheck %s +; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -pass-remarks-analysis=stack-frame-layout 2>&1 >/dev/null | FileCheck %s --check-prefixes=CHECK-FRAMELAYOUT + +; CHECK-FRAMELAYOUT-LABEL: Function: zpr_and_ppr_local +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Variable, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024 + +; <GPRs> +; %ppr_local sp+2048+30*vscale (= #15, mul vl for str/ldr PPR) +; 14 x vscale bytes of padding sp+2048+16*vscale +; <hazard padding> sp+1024+16*vscale +; %zpr_local sp+1024 +; <hazard padding> +; -> sp +define void @zpr_and_ppr_local(<vscale x 16 x i1> %pred, <vscale x 16 x i8> %vector) "aarch64_pstate_sm_compatible" { +; CHECK-LABEL: zpr_and_ppr_local: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2064 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: add x8, sp, #2048 +; CHECK-NEXT: str p0, [x8, #15, mul vl] +; CHECK-NEXT: add x8, sp, #1024 +; CHECK-NEXT: str z0, [x8] +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %ppr_local = alloca <vscale x 16 x i1> + %zpr_local = alloca <vscale x 16 x i8> + store volatile <vscale x 16 x i1> %pred, ptr %ppr_local + store volatile <vscale x 16 x i8> %vector, ptr %zpr_local + ret void +} + +; CHECK-FRAMELAYOUT-LABEL: Function: zpr_and_ppr_local_fp +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Variable, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024 + +; <GPRs> +; -> fp +; %ppr_local fp-2*vscale (= #-1, mul vl for str/ldr PPR) +; 14 x vscale bytes of padding fp-16*vscale +; <hazard padding> fp-1024-16*vscale +; %zpr_local fp-1024-32*vscale (= #-2, mul vl for str/ldr ZPR) +; <hazard padding> +; -> sp +define void @zpr_and_ppr_local_fp(<vscale x 16 x i1> %pred, <vscale x 16 x i8> %vector) "aarch64_pstate_sm_compatible" "frame-pointer"="all" { +; CHECK-LABEL: zpr_and_ppr_local_fp: +; CHECK: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: .cfi_def_cfa w29, 16 +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub x8, x29, #1024 +; CHECK-NEXT: str p0, [x29, #-1, mul vl] +; CHECK-NEXT: str z0, [x8, #-2, mul vl] +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret + %ppr_local = alloca <vscale x 16 x i1> + %zpr_local = alloca <vscale x 16 x i8> + store volatile <vscale x 16 x i1> %pred, ptr %ppr_local + store volatile <vscale x 16 x i8> %vector, ptr %zpr_local + ret void +} + +; CHECK-FRAMELAYOUT-LABEL: Function: fpr_and_ppr_local +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1048-16 x vscale], Type: Variable, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2080-16 x vscale], Type: Variable, Align: 16, Size: 1024 + +; <GPRs> +; %ppr_local sp+2064+14*vscale (= #7, mul vl for str/ldr PPR) +; 14 x vscale bytes of padding sp+2064 +; <hazard padding> sp+1040 +; %fpr_local sp+1032 +; 8 bytes of padding sp+1024 +; <hazard padding> +; -> sp +define void @fpr_and_ppr_local(<vscale x 16 x i1> %pred, double %double) "aarch64_pstate_sm_compatible" { +; CHECK-LABEL: fpr_and_ppr_local: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: sub sp, sp, #1040 +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: add x8, sp, #2064 +; CHECK-NEXT: str p0, [x8, #7, mul vl] +; CHECK-NEXT: str d0, [sp, #1032] +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1040 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %ppr_local = alloca <vscale x 16 x i1> + %fpr_local = alloca double + store volatile <vscale x 16 x i1> %pred, ptr %ppr_local + store volatile double %double, ptr %fpr_local + ret void +} + +; CHECK-FRAMELAYOUT-LABEL: Function: fpr_and_ppr_local_fp +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1048-16 x vscale], Type: Variable, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2080-16 x vscale], Type: Variable, Align: 16, Size: 1024 + +; <GPRs> +; -> fp +; %ppr_local fp-2*vscale (= #-1, mul vl for str/ldr PPR) +; 14 x vscale bytes of padding +; <hazard padding> +; %fpr_local sp+1032 +; 8 bytes of padding sp+1024 +; <hazard padding> +; -> sp +define void @fpr_and_ppr_local_fp(<vscale x 16 x i1> %pred, double %double) "aarch64_pstate_sm_compatible" "frame-pointer"="all" { +; CHECK-LABEL: fpr_and_ppr_local_fp: +; CHECK: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: sub sp, sp, #1040 +; CHECK-NEXT: .cfi_def_cfa w29, 16 +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: str p0, [x29, #-1, mul vl] +; CHECK-NEXT: str d0, [sp, #1032] +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1040 +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret + %ppr_local = alloca <vscale x 16 x i1> + %fpr_local = alloca double + store volatile <vscale x 16 x i1> %pred, ptr %ppr_local + store volatile double %double, ptr %fpr_local + ret void +} + +; CHECK-FRAMELAYOUT-LABEL: Function: gpr_and_ppr_local +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2072-32 x vscale], Type: Variable, Align: 8, Size: 8 + +; <CS GPRs> +; %ppr_local sp+2064+30*vscale (= #15, mul vl for str/ldr PPR) +; 14 x vscale bytes of padding +; <hazard padding> sp+1040+16*vscale +; <fpr callee save: z8> sp+1040 +; <hazard padding> sp+16 +; %gpr_local sp+8 +; 8 bytes of padding +; -> sp +define void @gpr_and_ppr_local(<vscale x 16 x i1> %pred, i64 %int) "aarch64_pstate_sm_compatible" { +; CHECK-LABEL: gpr_and_ppr_local: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1040 +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2080 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 16 * VG - 1040 +; CHECK-NEXT: add x8, sp, #2064 +; CHECK-NEXT: //APP +; CHECK-NEXT: //NO_APP +; CHECK-NEXT: str p0, [x8, #15, mul vl] +; CHECK-NEXT: str x0, [sp, #8] +; CHECK-NEXT: add sp, sp, #1040 +; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + tail call void asm sideeffect "", "~{d8}"() #1 ; Spill an FPR so hazard padding is needed + %ppr_local = alloca <vscale x 16 x i1> + %gpr_local = alloca i64 + store volatile <vscale x 16 x i1> %pred, ptr %ppr_local + store volatile i64 %int, ptr %gpr_local + ret void +} + +; CHECK-FRAMELAYOUT-LABEL: Function: gpr_and_ppr_local_fp +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2072-32 x vscale], Type: Variable, Align: 8, Size: 8 + +; <CS GPRs> +; -> fp +; %ppr_local fp-2*vscale (= #-1, mul vl for str/ldr PPR) +; 14 x vscale bytes of padding +; <hazard padding> +; <fpr callee save: z8> +; <hazard padding> +; %gpr_local sp+8 +; 8 bytes of padding +; -> sp +define void @gpr_and_ppr_local_fp(<vscale x 16 x i1> %pred, i64 %int) "aarch64_pstate_sm_compatible" "frame-pointer"="all" { +; CHECK-LABEL: gpr_and_ppr_local_fp: +; CHECK: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1040 +; CHECK-NEXT: .cfi_def_cfa w29, 16 +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 16 * VG - 1040 +; CHECK-NEXT: //APP +; CHECK-NEXT: //NO_APP +; CHECK-NEXT: str p0, [x29, #-1, mul vl] +; CHECK-NEXT: str x0, [sp, #8] +; CHECK-NEXT: add sp, sp, #1040 +; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret + tail call void asm sideeffect "", "~{d8}"() #1 ; Spill an FPR so hazard padding is needed + %ppr_local = alloca <vscale x 16 x i1> + %gpr_local = alloca i64 + store volatile <vscale x 16 x i1> %pred, ptr %ppr_local + store volatile i64 %int, ptr %gpr_local + ret void +} + +; CHECK-FRAMELAYOUT-LABEL: Function: all_stack_areas +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-4 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-6 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-8 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-10 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-12 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-14 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-16 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-18 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-20 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-22 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-24 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-34 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-272 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-288 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-304 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-320 x vscale], Type: Variable, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1048-320 x vscale], Type: Variable, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2080-320 x vscale], Type: Variable, Align: 16, Size: 1024 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2088-320 x vscale], Type: Variable, Align: 8, Size: 8 + +; <CS GPRs> +; <CS PPRs> +; %ppr_local sp+2080+286*vscale (addvl #17, addpl #7) +; 14 * vscale bytes of padding sp+2080+272*vscale +; <hazard padding> sp+1056+272*vscale +; <CS ZPRs> sp+1056+16*vscale +; %zpr_local sp+1056 +; %fpr_local sp+1048 +; 8 bytes of padding sp+1040 +; <hazard padding> sp+16 +; %gpr_local sp+8 +; 8 bytes of padding sp +; -> sp +define void @all_stack_areas(<vscale x 16 x i1> %pred, double %fp) { +; CHECK-LABEL: all_stack_areas: +; CHECK: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-17 +; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill +; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1056 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 2096 + 160 * VG +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 32 * VG - 1040 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d9 @ cfa - 40 * VG - 1040 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d10 @ cfa - 48 * VG - 1040 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d11 @ cfa - 56 * VG - 1040 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d12 @ cfa - 64 * VG - 1040 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d13 @ cfa - 72 * VG - 1040 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d14 @ cfa - 80 * VG - 1040 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x7f, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d15 @ cfa - 88 * VG - 1040 +; CHECK-NEXT: add x0, sp, #2080 +; CHECK-NEXT: add x8, sp, #2080 +; CHECK-NEXT: add x1, sp, #1056 +; CHECK-NEXT: addvl x0, x0, #17 +; CHECK-NEXT: add x2, sp, #1048 +; CHECK-NEXT: add x3, sp, #8 +; CHECK-NEXT: addpl x0, x0, #7 +; CHECK-NEXT: str d0, [sp, #1048] +; CHECK-NEXT: str p0, [x8, #143, mul vl] +; CHECK-NEXT: bl foo +; CHECK-NEXT: add sp, sp, #1056 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #17 +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret + %ppr_local = alloca <vscale x 16 x i1> + %zpr_local = alloca <vscale x 16 x i8> + %fpr_local = alloca double + ; // Needed to sort %fpr_local into the FPR region + store double %fp, ptr %fpr_local + ; // Needed to sort %ppr_local into the PPR region + store <vscale x 16 x i1> %pred, ptr %ppr_local + %gpr_local = alloca i64 + call void @foo(ptr %ppr_local, ptr %zpr_local, ptr %fpr_local, ptr %gpr_local) + ret void +} +declare void @foo(ptr, ptr, ptr, ptr) + +; CHECK-FRAMELAYOUT-LABEL: Function: all_stack_areas_fp +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-24], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-2 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-4 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-6 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-8 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-10 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-12 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-14 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-18 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-20 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-22 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-24 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-34 x vscale], Type: Variable, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-272 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-288 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-304 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-320 x vscale], Type: Variable, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1064-320 x vscale], Type: Variable, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2096-320 x vscale], Type: Variable, Align: 16, Size: 1024 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2104-320 x vscale], Type: Variable, Align: 8, Size: 8 + +; <CS GPRs> +; -> fp +; <CS PPRs> fp-32*vscale +; %ppr_local fp-34*vscale (addpl #-17) +; 14 * vscale bytes of padding fp-48*vscale +; <hazard padding> fp-1024-48*vscale +; <CS ZPRs> fp-1024-304*vscale +; %zpr_local sp-1024-320*vscale (addvl #-20) +; %fpr_local sp+1048 +; 8 bytes of padding sp+1040 +; <hazard padding> sp+16 +; %gpr_local sp+8 +; 8 bytes of padding sp +; -> sp +define void @all_stack_areas_fp(<vscale x 16 x i1> %pred, double %fp) "frame-pointer"="all" { +; CHECK-LABEL: all_stack_areas_fp: +; CHECK: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-NEXT: str x28, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-17 +; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill +; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1056 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: .cfi_def_cfa w29, 32 +; CHECK-NEXT: .cfi_offset w28, -16 +; CHECK-NEXT: .cfi_offset w30, -24 +; CHECK-NEXT: .cfi_offset w29, -32 +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d8 @ cfa - 32 * VG - 1056 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d9 @ cfa - 40 * VG - 1056 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d10 @ cfa - 48 * VG - 1056 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d11 @ cfa - 56 * VG - 1056 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d12 @ cfa - 64 * VG - 1056 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d13 @ cfa - 72 * VG - 1056 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d14 @ cfa - 80 * VG - 1056 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x7f, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d15 @ cfa - 88 * VG - 1056 +; CHECK-NEXT: sub x1, x29, #1024 +; CHECK-NEXT: addpl x0, x29, #-17 +; CHECK-NEXT: add x2, sp, #1048 +; CHECK-NEXT: addvl x1, x1, #-20 +; CHECK-NEXT: add x3, sp, #8 +; CHECK-NEXT: str d0, [sp, #1048] +; CHECK-NEXT: str p0, [x29, #-17, mul vl] +; CHECK-NEXT: bl foo +; CHECK-NEXT: add sp, sp, #1056 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #17 +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: ldr x28, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-NEXT: ret + %ppr_local = alloca <vscale x 16 x i1> + %zpr_local = alloca <vscale x 16 x i8> + %fpr_local = alloca double + ; // Needed to sort %fpr_local into the FPR region + store double %fp, ptr %fpr_local + ; // Needed to sort %ppr_local into the PPR region + store <vscale x 16 x i1> %pred, ptr %ppr_local + %gpr_local = alloca i64 + call void @foo(ptr %ppr_local, ptr %zpr_local, ptr %fpr_local, ptr %gpr_local) + ret void +} + +; CHECK-FRAMELAYOUT-LABEL: Function: svecc_call +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-24], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48], Type: Spill, Align: 16, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-56], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64], Type: Spill, Align: 8, Size: 8 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-2 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-4 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-6 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-8 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-10 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-12 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-14 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-16 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-18 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-20 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-22 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-24 x vscale], Type: Spill, Align: 2, Size: vscale x 2 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-48 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-272 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-288 x vscale], Type: Spill, Align: 16, Size: vscale x 16 +; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2112-288 x vscale], Type: Variable, Align: 16, Size: 1024 + +define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) "aarch64_pstate_sm_compatible" { +; CHECK-LABEL: svecc_call: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: cntd x9 +; CHECK-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill +; CHECK-NEXT: str x9, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: .cfi_def_cfa w29, 64 +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w26, -16 +; CHECK-NEXT: .cfi_offset w27, -24 +; CHECK-NEXT: .cfi_offset w28, -32 +; CHECK-NEXT: .cfi_offset vg, -48 +; CHECK-NEXT: .cfi_offset w30, -56 +; CHECK-NEXT: .cfi_offset w29, -64 +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-16 +; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill +; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 24 * IncomingVG - 1088 +; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 32 * IncomingVG - 1088 +; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 40 * IncomingVG - 1088 +; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 48 * IncomingVG - 1088 +; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 56 * IncomingVG - 1088 +; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 64 * IncomingVG - 1088 +; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 72 * IncomingVG - 1088 +; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 80 * IncomingVG - 1088 +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: mov x8, x0 +; CHECK-NEXT: bl __arm_sme_state +; CHECK-NEXT: mov x19, x0 +; CHECK-NEXT: //APP +; CHECK-NEXT: //NO_APP +; CHECK-NEXT: tbz w19, #0, .LBB8_2 +; CHECK-NEXT: // %bb.1: // %entry +; CHECK-NEXT: smstop sm +; CHECK-NEXT: .LBB8_2: // %entry +; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: mov w1, #45 // =0x2d +; CHECK-NEXT: mov w2, #37 // =0x25 +; CHECK-NEXT: bl memset +; CHECK-NEXT: tbz w19, #0, .LBB8_4 +; CHECK-NEXT: // %bb.3: // %entry +; CHECK-NEXT: smstart sm +; CHECK-NEXT: .LBB8_4: // %entry +; CHECK-NEXT: mov w0, #22647 // =0x5877 +; CHECK-NEXT: movk w0, #59491, lsl #16 +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #16 +; CHECK-NEXT: .cfi_restore z8 +; CHECK-NEXT: .cfi_restore z9 +; CHECK-NEXT: .cfi_restore z10 +; CHECK-NEXT: .cfi_restore z11 +; CHECK-NEXT: .cfi_restore z12 +; CHECK-NEXT: .cfi_restore z13 +; CHECK-NEXT: .cfi_restore z14 +; CHECK-NEXT: .cfi_restore z15 +; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #2 +; CHECK-NEXT: .cfi_def_cfa wsp, 64 +; CHECK-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload +; CHECK-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload +; CHECK-NEXT: .cfi_def_cfa_offset 0 +; CHECK-NEXT: .cfi_restore w19 +; CHECK-NEXT: .cfi_restore w26 +; CHECK-NEXT: .cfi_restore w27 +; CHECK-NEXT: .cfi_restore w28 +; CHECK-NEXT: .cfi_restore vg +; CHECK-NEXT: .cfi_restore w30 +; CHECK-NEXT: .cfi_restore w29 +; CHECK-NEXT: ret +entry: + tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2 + %call = call ptr @memset(ptr noundef nonnull %P1, i32 noundef 45, i32 noundef 37) + ret i32 -396142473 +} +declare ptr @memset(ptr, i32, i32) + +; FIXME: aarch64-split-sve-objects is currently not supported in this function +; as it requires stack reealignment (for the 32-byte aligned alloca). +; GPR CSRs +; <hazard padding> +; FPR CSRs +; <hazrd padding> +; <SVE locals (PPRs and ZPRs)> <--- hazard between PPRs and ZPRs here! +; <realignment padding> +; -> sp +define void @zpr_and_ppr_local_realignment(<vscale x 16 x i1> %pred, <vscale x 16 x i8> %vector, i64 %gpr) "aarch64_pstate_sm_compatible" { +; CHECK-LABEL: zpr_and_ppr_local_realignment: +; CHECK: // %bb.0: +; CHECK-NEXT: sub sp, sp, #1040 +; CHECK-NEXT: sub x9, sp, #1040 +; CHECK-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill +; CHECK-NEXT: add x29, sp, #1024 +; CHECK-NEXT: addvl x9, x9, #-2 +; CHECK-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill +; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0 +; CHECK-NEXT: .cfi_def_cfa w29, 16 +; CHECK-NEXT: .cfi_offset w30, -8 +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: sub x8, x29, #1024 +; CHECK-NEXT: str p0, [x8, #-1, mul vl] +; CHECK-NEXT: str z0, [x8, #-2, mul vl] +; CHECK-NEXT: str x0, [sp] +; CHECK-NEXT: sub sp, x29, #1024 +; CHECK-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload +; CHECK-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload +; CHECK-NEXT: add sp, sp, #1040 +; CHECK-NEXT: ret + %ppr_local = alloca <vscale x 16 x i1> + %zpr_local = alloca <vscale x 16 x i8> + %gpr_local = alloca i64, align 32 + store volatile <vscale x 16 x i1> %pred, ptr %ppr_local + store volatile <vscale x 16 x i8> %vector, ptr %zpr_local + store volatile i64 %gpr, ptr %gpr_local + ret void +} + +define void @zpr_and_ppr_local_stack_probing(<vscale x 16 x i1> %pred, <vscale x 16 x i8> %vector, i64 %gpr) +; CHECK-LABEL: zpr_and_ppr_local_stack_probing: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: sub sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str xzr, [sp] +; CHECK-NEXT: sub sp, sp, #1824 +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str xzr, [sp] +; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xb0, 0x16, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2864 + 16 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: add x8, sp, #2848 +; CHECK-NEXT: str p0, [x8, #15, mul vl] +; CHECK-NEXT: add x8, sp, #1824 +; CHECK-NEXT: str z0, [x8] +; CHECK-NEXT: str x0, [sp] +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1024 +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: add sp, sp, #1824 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + "probe-stack"="inline-asm" "stack-probe-size"="4096" "frame-pointer"="none" "aarch64_pstate_sm_compatible" +{ + %ppr_local = alloca <vscale x 16 x i1> + %zpr_local = alloca <vscale x 16 x i8> + %gpr_local = alloca i64, i64 100, align 8 + store volatile <vscale x 16 x i1> %pred, ptr %ppr_local + store volatile <vscale x 16 x i8> %vector, ptr %zpr_local + store volatile i64 %gpr, ptr %gpr_local + ret void +} diff --git a/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll b/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll index 1de8d0a..01e3d3a 100644 --- a/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll +++ b/llvm/test/CodeGen/AArch64/ssve-stack-hazard-remarks.ll @@ -68,13 +68,12 @@ entry: } ; SVE calling conventions -; Predicate register spills end up in FP region, currently. This can be -; mitigated with the -aarch64-enable-zpr-predicate-spills option. +; Padding is placed between predicate and fpr/zpr register spills, so only emit remarks when hazard padding is off. +; Note: The -aarch64-enable-zpr-predicate-spills option is deprecated (and will be removed soon). define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) #2 { ; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at [SP-64-258 * vscale] is too close to FPR stack object at [SP-64-256 * vscale] ; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64] -; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at [SP-1088-258 * vscale] is too close to FPR stack object at [SP-1088-256 * vscale] ; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'svecc_call': ; CHECK-ZPR-PRED-SPILLS-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object ; CHECK-ZPR-PRED-SPILLS: <unknown>:0:0: stack hazard in 'svecc_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64] @@ -89,7 +88,6 @@ entry: define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) #2 { ; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': PPR stack object at [SP-64-258 * vscale] is too close to FPR stack object at [SP-64-256 * vscale] ; CHECK: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64] -; CHECK-PADDING: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': PPR stack object at [SP-1088-258 * vscale] is too close to FPR stack object at [SP-1088-256 * vscale] ; CHECK-PADDING-NOT: remark: <unknown>:0:0: stack hazard in 'svecc_alloca_call': ; CHECK-ZPR-PRED-SPILLS-NOT: <unknown>:0:0: stack hazard in 'svecc_call': PPR stack object at {{.*}} is too close to FPR stack object ; CHECK-ZPR-PRED-SPILLS: <unknown>:0:0: stack hazard in 'svecc_alloca_call': FPR stack object at [SP-64-16 * vscale] is too close to GPR stack object at [SP-64] diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll index 5f52280..bdee359 100644 --- a/llvm/test/CodeGen/AArch64/stack-hazard.ll +++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll @@ -1,7 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=0 | FileCheck %s --check-prefixes=CHECK,CHECK0 ; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=64 | FileCheck %s --check-prefixes=CHECK,CHECK64 -; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024 +; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-split-sve-objects=false -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-NOSPLITSVE +; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-SPLITSVE define i32 @basic(i32 noundef %num) { ; CHECK-LABEL: basic: @@ -1503,72 +1504,24 @@ define [2 x <vscale x 4 x i1>] @sve_signature_pred_2xv4i1([2 x <vscale x 4 x i1> } define [2 x <vscale x 4 x i1>] @sve_signature_pred_2xv4i1_caller([2 x <vscale x 4 x i1>] %arg1, [2 x <vscale x 4 x i1>] %arg2) nounwind "aarch64_pstate_sm_compatible" { -; CHECK0-LABEL: sve_signature_pred_2xv4i1_caller: -; CHECK0: // %bb.0: -; CHECK0-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill -; CHECK0-NEXT: addvl sp, sp, #-1 -; CHECK0-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK0-NEXT: mov p5.b, p0.b -; CHECK0-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK0-NEXT: mov p4.b, p1.b -; CHECK0-NEXT: mov p0.b, p2.b -; CHECK0-NEXT: mov p1.b, p3.b -; CHECK0-NEXT: mov p2.b, p5.b -; CHECK0-NEXT: mov p3.b, p4.b -; CHECK0-NEXT: bl sve_signature_pred_2xv4i1 -; CHECK0-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload -; CHECK0-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload -; CHECK0-NEXT: addvl sp, sp, #1 -; CHECK0-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK0-NEXT: ret -; -; CHECK64-LABEL: sve_signature_pred_2xv4i1_caller: -; CHECK64: // %bb.0: -; CHECK64-NEXT: sub sp, sp, #80 -; CHECK64-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill -; CHECK64-NEXT: addvl sp, sp, #-1 -; CHECK64-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK64-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK64-NEXT: sub sp, sp, #64 -; CHECK64-NEXT: mov p4.b, p1.b -; CHECK64-NEXT: mov p5.b, p0.b -; CHECK64-NEXT: mov p0.b, p2.b -; CHECK64-NEXT: mov p1.b, p3.b -; CHECK64-NEXT: mov p2.b, p5.b -; CHECK64-NEXT: mov p3.b, p4.b -; CHECK64-NEXT: bl sve_signature_pred_2xv4i1 -; CHECK64-NEXT: add sp, sp, #64 -; CHECK64-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload -; CHECK64-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload -; CHECK64-NEXT: addvl sp, sp, #1 -; CHECK64-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload -; CHECK64-NEXT: add sp, sp, #80 -; CHECK64-NEXT: ret -; -; CHECK1024-LABEL: sve_signature_pred_2xv4i1_caller: -; CHECK1024: // %bb.0: -; CHECK1024-NEXT: sub sp, sp, #1040 -; CHECK1024-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill -; CHECK1024-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill -; CHECK1024-NEXT: addvl sp, sp, #-1 -; CHECK1024-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: sub sp, sp, #1024 -; CHECK1024-NEXT: mov p4.b, p1.b -; CHECK1024-NEXT: mov p5.b, p0.b -; CHECK1024-NEXT: mov p0.b, p2.b -; CHECK1024-NEXT: mov p1.b, p3.b -; CHECK1024-NEXT: mov p2.b, p5.b -; CHECK1024-NEXT: mov p3.b, p4.b -; CHECK1024-NEXT: bl sve_signature_pred_2xv4i1 -; CHECK1024-NEXT: add sp, sp, #1024 -; CHECK1024-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: addvl sp, sp, #1 -; CHECK1024-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload -; CHECK1024-NEXT: add sp, sp, #1040 -; CHECK1024-NEXT: ret +; CHECK-LABEL: sve_signature_pred_2xv4i1_caller: +; CHECK: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p5.b, p0.b +; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p4.b, p1.b +; CHECK-NEXT: mov p0.b, p2.b +; CHECK-NEXT: mov p1.b, p3.b +; CHECK-NEXT: mov p2.b, p5.b +; CHECK-NEXT: mov p3.b, p4.b +; CHECK-NEXT: bl sve_signature_pred_2xv4i1 +; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload +; CHECK-NEXT: ret %res = call [2 x <vscale x 4 x i1>] @sve_signature_pred_2xv4i1([2 x <vscale x 4 x i1>] %arg2, [2 x <vscale x 4 x i1>] %arg1) ret [2 x <vscale x 4 x i1>] %res } @@ -1987,23 +1940,22 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; ; CHECK64-LABEL: svecc_call: ; CHECK64: // %bb.0: // %entry -; CHECK64-NEXT: sub sp, sp, #128 -; CHECK64-NEXT: .cfi_def_cfa_offset 128 +; CHECK64-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill +; CHECK64-NEXT: .cfi_def_cfa_offset 64 ; CHECK64-NEXT: cntd x9 -; CHECK64-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill -; CHECK64-NEXT: stp x9, x28, [sp, #80] // 16-byte Folded Spill -; CHECK64-NEXT: stp x27, x26, [sp, #96] // 16-byte Folded Spill -; CHECK64-NEXT: str x19, [sp, #112] // 8-byte Folded Spill -; CHECK64-NEXT: add x29, sp, #64 +; CHECK64-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill +; CHECK64-NEXT: str x9, [sp, #16] // 8-byte Folded Spill +; CHECK64-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill +; CHECK64-NEXT: mov x29, sp ; CHECK64-NEXT: .cfi_def_cfa w29, 64 -; CHECK64-NEXT: .cfi_offset w19, -16 -; CHECK64-NEXT: .cfi_offset w26, -24 -; CHECK64-NEXT: .cfi_offset w27, -32 -; CHECK64-NEXT: .cfi_offset w28, -40 +; CHECK64-NEXT: .cfi_offset w19, -8 +; CHECK64-NEXT: .cfi_offset w26, -16 +; CHECK64-NEXT: .cfi_offset w27, -24 +; CHECK64-NEXT: .cfi_offset w28, -32 ; CHECK64-NEXT: .cfi_offset vg, -48 ; CHECK64-NEXT: .cfi_offset w30, -56 ; CHECK64-NEXT: .cfi_offset w29, -64 -; CHECK64-NEXT: addvl sp, sp, #-18 +; CHECK64-NEXT: addvl sp, sp, #-2 ; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2016,30 +1968,32 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill -; CHECK64-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * IncomingVG - 128 +; CHECK64-NEXT: sub sp, sp, #64 +; CHECK64-NEXT: addvl sp, sp, #-16 +; CHECK64-NEXT: str z23, [sp] // 16-byte Folded Spill +; CHECK64-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 24 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 32 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 40 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 48 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 56 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 64 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 72 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 80 * IncomingVG - 128 ; CHECK64-NEXT: sub sp, sp, #64 ; CHECK64-NEXT: mov x8, x0 ; CHECK64-NEXT: bl __arm_sme_state @@ -2061,22 +2015,32 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: mov w0, #22647 // =0x5877 ; CHECK64-NEXT: movk w0, #59491, lsl #16 ; CHECK64-NEXT: add sp, sp, #64 -; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z23, [sp] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: add sp, sp, #64 +; CHECK64-NEXT: addvl sp, sp, #16 +; CHECK64-NEXT: .cfi_restore z8 +; CHECK64-NEXT: .cfi_restore z9 +; CHECK64-NEXT: .cfi_restore z10 +; CHECK64-NEXT: .cfi_restore z11 +; CHECK64-NEXT: .cfi_restore z12 +; CHECK64-NEXT: .cfi_restore z13 +; CHECK64-NEXT: .cfi_restore z14 +; CHECK64-NEXT: .cfi_restore z15 ; CHECK64-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -2089,20 +2053,11 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload -; CHECK64-NEXT: addvl sp, sp, #18 -; CHECK64-NEXT: .cfi_restore z8 -; CHECK64-NEXT: .cfi_restore z9 -; CHECK64-NEXT: .cfi_restore z10 -; CHECK64-NEXT: .cfi_restore z11 -; CHECK64-NEXT: .cfi_restore z12 -; CHECK64-NEXT: .cfi_restore z13 -; CHECK64-NEXT: .cfi_restore z14 -; CHECK64-NEXT: .cfi_restore z15 -; CHECK64-NEXT: .cfi_def_cfa wsp, 128 -; CHECK64-NEXT: ldp x26, x19, [sp, #104] // 16-byte Folded Reload -; CHECK64-NEXT: ldp x28, x27, [sp, #88] // 16-byte Folded Reload -; CHECK64-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload -; CHECK64-NEXT: add sp, sp, #128 +; CHECK64-NEXT: addvl sp, sp, #2 +; CHECK64-NEXT: .cfi_def_cfa wsp, 64 +; CHECK64-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload +; CHECK64-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload +; CHECK64-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload ; CHECK64-NEXT: .cfi_def_cfa_offset 0 ; CHECK64-NEXT: .cfi_restore w19 ; CHECK64-NEXT: .cfi_restore w26 @@ -2113,139 +2068,269 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, ; CHECK64-NEXT: .cfi_restore w29 ; CHECK64-NEXT: ret ; -; CHECK1024-LABEL: svecc_call: -; CHECK1024: // %bb.0: // %entry -; CHECK1024-NEXT: sub sp, sp, #1088 -; CHECK1024-NEXT: .cfi_def_cfa_offset 1088 -; CHECK1024-NEXT: cntd x9 -; CHECK1024-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill -; CHECK1024-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill -; CHECK1024-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill -; CHECK1024-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill -; CHECK1024-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill -; CHECK1024-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill -; CHECK1024-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill -; CHECK1024-NEXT: add x29, sp, #1024 -; CHECK1024-NEXT: .cfi_def_cfa w29, 64 -; CHECK1024-NEXT: .cfi_offset w19, -16 -; CHECK1024-NEXT: .cfi_offset w26, -24 -; CHECK1024-NEXT: .cfi_offset w27, -32 -; CHECK1024-NEXT: .cfi_offset w28, -40 -; CHECK1024-NEXT: .cfi_offset vg, -48 -; CHECK1024-NEXT: .cfi_offset w30, -56 -; CHECK1024-NEXT: .cfi_offset w29, -64 -; CHECK1024-NEXT: addvl sp, sp, #-18 -; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * IncomingVG - 1088 -; CHECK1024-NEXT: sub sp, sp, #1024 -; CHECK1024-NEXT: mov x8, x0 -; CHECK1024-NEXT: bl __arm_sme_state -; CHECK1024-NEXT: mov x19, x0 -; CHECK1024-NEXT: //APP -; CHECK1024-NEXT: //NO_APP -; CHECK1024-NEXT: tbz w19, #0, .LBB28_2 -; CHECK1024-NEXT: // %bb.1: // %entry -; CHECK1024-NEXT: smstop sm -; CHECK1024-NEXT: .LBB28_2: // %entry -; CHECK1024-NEXT: mov x0, x8 -; CHECK1024-NEXT: mov w1, #45 // =0x2d -; CHECK1024-NEXT: mov w2, #37 // =0x25 -; CHECK1024-NEXT: bl memset -; CHECK1024-NEXT: tbz w19, #0, .LBB28_4 -; CHECK1024-NEXT: // %bb.3: // %entry -; CHECK1024-NEXT: smstart sm -; CHECK1024-NEXT: .LBB28_4: // %entry -; CHECK1024-NEXT: mov w0, #22647 // =0x5877 -; CHECK1024-NEXT: movk w0, #59491, lsl #16 -; CHECK1024-NEXT: add sp, sp, #1024 -; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: addvl sp, sp, #18 -; CHECK1024-NEXT: .cfi_restore z8 -; CHECK1024-NEXT: .cfi_restore z9 -; CHECK1024-NEXT: .cfi_restore z10 -; CHECK1024-NEXT: .cfi_restore z11 -; CHECK1024-NEXT: .cfi_restore z12 -; CHECK1024-NEXT: .cfi_restore z13 -; CHECK1024-NEXT: .cfi_restore z14 -; CHECK1024-NEXT: .cfi_restore z15 -; CHECK1024-NEXT: .cfi_def_cfa wsp, 1088 -; CHECK1024-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload -; CHECK1024-NEXT: add sp, sp, #1088 -; CHECK1024-NEXT: .cfi_def_cfa_offset 0 -; CHECK1024-NEXT: .cfi_restore w19 -; CHECK1024-NEXT: .cfi_restore w26 -; CHECK1024-NEXT: .cfi_restore w27 -; CHECK1024-NEXT: .cfi_restore w28 -; CHECK1024-NEXT: .cfi_restore vg -; CHECK1024-NEXT: .cfi_restore w30 -; CHECK1024-NEXT: .cfi_restore w29 -; CHECK1024-NEXT: ret +; CHECK1024-NOSPLITSVE-LABEL: svecc_call: +; CHECK1024-NOSPLITSVE: // %bb.0: // %entry +; CHECK1024-NOSPLITSVE-NEXT: sub sp, sp, #1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa_offset 1088 +; CHECK1024-NOSPLITSVE-NEXT: cntd x9 +; CHECK1024-NOSPLITSVE-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: add x29, sp, #1024 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa w29, 64 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w19, -16 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w26, -24 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w27, -32 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w28, -40 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset vg, -48 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w30, -56 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w29, -64 +; CHECK1024-NOSPLITSVE-NEXT: addvl sp, sp, #-18 +; CHECK1024-NOSPLITSVE-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: sub sp, sp, #1024 +; CHECK1024-NOSPLITSVE-NEXT: mov x8, x0 +; CHECK1024-NOSPLITSVE-NEXT: bl __arm_sme_state +; CHECK1024-NOSPLITSVE-NEXT: mov x19, x0 +; CHECK1024-NOSPLITSVE-NEXT: //APP +; CHECK1024-NOSPLITSVE-NEXT: //NO_APP +; CHECK1024-NOSPLITSVE-NEXT: tbz w19, #0, .LBB28_2 +; CHECK1024-NOSPLITSVE-NEXT: // %bb.1: // %entry +; CHECK1024-NOSPLITSVE-NEXT: smstop sm +; CHECK1024-NOSPLITSVE-NEXT: .LBB28_2: // %entry +; CHECK1024-NOSPLITSVE-NEXT: mov x0, x8 +; CHECK1024-NOSPLITSVE-NEXT: mov w1, #45 // =0x2d +; CHECK1024-NOSPLITSVE-NEXT: mov w2, #37 // =0x25 +; CHECK1024-NOSPLITSVE-NEXT: bl memset +; CHECK1024-NOSPLITSVE-NEXT: tbz w19, #0, .LBB28_4 +; CHECK1024-NOSPLITSVE-NEXT: // %bb.3: // %entry +; CHECK1024-NOSPLITSVE-NEXT: smstart sm +; CHECK1024-NOSPLITSVE-NEXT: .LBB28_4: // %entry +; CHECK1024-NOSPLITSVE-NEXT: mov w0, #22647 // =0x5877 +; CHECK1024-NOSPLITSVE-NEXT: movk w0, #59491, lsl #16 +; CHECK1024-NOSPLITSVE-NEXT: add sp, sp, #1024 +; CHECK1024-NOSPLITSVE-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: addvl sp, sp, #18 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z8 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z9 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z10 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z11 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z12 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z13 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z14 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z15 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa wsp, 1088 +; CHECK1024-NOSPLITSVE-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: add sp, sp, #1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa_offset 0 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w19 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w26 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w27 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w28 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore vg +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w30 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w29 +; CHECK1024-NOSPLITSVE-NEXT: ret +; +; CHECK1024-SPLITSVE-LABEL: svecc_call: +; CHECK1024-SPLITSVE: // %bb.0: // %entry +; CHECK1024-SPLITSVE-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa_offset 64 +; CHECK1024-SPLITSVE-NEXT: cntd x9 +; CHECK1024-SPLITSVE-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str x9, [sp, #16] // 8-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: mov x29, sp +; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa w29, 64 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w19, -8 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w26, -16 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w27, -24 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w28, -32 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset vg, -48 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w30, -56 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w29, -64 +; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #-2 +; CHECK1024-SPLITSVE-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: sub sp, sp, #1024 +; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #-16 +; CHECK1024-SPLITSVE-NEXT: str z23, [sp] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 24 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 32 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 40 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 48 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 56 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 64 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 72 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 80 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: sub sp, sp, #1024 +; CHECK1024-SPLITSVE-NEXT: mov x8, x0 +; CHECK1024-SPLITSVE-NEXT: bl __arm_sme_state +; CHECK1024-SPLITSVE-NEXT: mov x19, x0 +; CHECK1024-SPLITSVE-NEXT: //APP +; CHECK1024-SPLITSVE-NEXT: //NO_APP +; CHECK1024-SPLITSVE-NEXT: tbz w19, #0, .LBB28_2 +; CHECK1024-SPLITSVE-NEXT: // %bb.1: // %entry +; CHECK1024-SPLITSVE-NEXT: smstop sm +; CHECK1024-SPLITSVE-NEXT: .LBB28_2: // %entry +; CHECK1024-SPLITSVE-NEXT: mov x0, x8 +; CHECK1024-SPLITSVE-NEXT: mov w1, #45 // =0x2d +; CHECK1024-SPLITSVE-NEXT: mov w2, #37 // =0x25 +; CHECK1024-SPLITSVE-NEXT: bl memset +; CHECK1024-SPLITSVE-NEXT: tbz w19, #0, .LBB28_4 +; CHECK1024-SPLITSVE-NEXT: // %bb.3: // %entry +; CHECK1024-SPLITSVE-NEXT: smstart sm +; CHECK1024-SPLITSVE-NEXT: .LBB28_4: // %entry +; CHECK1024-SPLITSVE-NEXT: mov w0, #22647 // =0x5877 +; CHECK1024-SPLITSVE-NEXT: movk w0, #59491, lsl #16 +; CHECK1024-SPLITSVE-NEXT: add sp, sp, #1024 +; CHECK1024-SPLITSVE-NEXT: ldr z23, [sp] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: add sp, sp, #1024 +; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #16 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z8 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z9 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z10 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z11 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z12 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z13 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z14 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z15 +; CHECK1024-SPLITSVE-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #2 +; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa wsp, 64 +; CHECK1024-SPLITSVE-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa_offset 0 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w19 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w26 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w27 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w28 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore vg +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w30 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w29 +; CHECK1024-SPLITSVE-NEXT: ret entry: tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2 %call = call ptr @memset(ptr noundef nonnull %P1, i32 noundef 45, i32 noundef 37) @@ -2380,23 +2465,22 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; ; CHECK64-LABEL: svecc_alloca_call: ; CHECK64: // %bb.0: // %entry -; CHECK64-NEXT: sub sp, sp, #128 -; CHECK64-NEXT: .cfi_def_cfa_offset 128 +; CHECK64-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill +; CHECK64-NEXT: .cfi_def_cfa_offset 64 ; CHECK64-NEXT: cntd x9 -; CHECK64-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill -; CHECK64-NEXT: stp x9, x28, [sp, #80] // 16-byte Folded Spill -; CHECK64-NEXT: stp x27, x26, [sp, #96] // 16-byte Folded Spill -; CHECK64-NEXT: str x19, [sp, #112] // 8-byte Folded Spill -; CHECK64-NEXT: add x29, sp, #64 +; CHECK64-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill +; CHECK64-NEXT: str x9, [sp, #16] // 8-byte Folded Spill +; CHECK64-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill +; CHECK64-NEXT: mov x29, sp ; CHECK64-NEXT: .cfi_def_cfa w29, 64 -; CHECK64-NEXT: .cfi_offset w19, -16 -; CHECK64-NEXT: .cfi_offset w26, -24 -; CHECK64-NEXT: .cfi_offset w27, -32 -; CHECK64-NEXT: .cfi_offset w28, -40 +; CHECK64-NEXT: .cfi_offset w19, -8 +; CHECK64-NEXT: .cfi_offset w26, -16 +; CHECK64-NEXT: .cfi_offset w27, -24 +; CHECK64-NEXT: .cfi_offset w28, -32 ; CHECK64-NEXT: .cfi_offset vg, -48 ; CHECK64-NEXT: .cfi_offset w30, -56 ; CHECK64-NEXT: .cfi_offset w29, -64 -; CHECK64-NEXT: addvl sp, sp, #-18 +; CHECK64-NEXT: addvl sp, sp, #-2 ; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill @@ -2409,30 +2493,32 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill ; CHECK64-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill -; CHECK64-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * IncomingVG - 128 -; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * IncomingVG - 128 +; CHECK64-NEXT: sub sp, sp, #64 +; CHECK64-NEXT: addvl sp, sp, #-16 +; CHECK64-NEXT: str z23, [sp] // 16-byte Folded Spill +; CHECK64-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 24 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 32 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 40 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 48 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 56 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 64 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 72 * IncomingVG - 128 +; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 80 * IncomingVG - 128 ; CHECK64-NEXT: sub sp, sp, #112 ; CHECK64-NEXT: bl __arm_sme_state ; CHECK64-NEXT: mov x19, x0 @@ -2453,22 +2539,32 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: mov w0, #22647 // =0x5877 ; CHECK64-NEXT: movk w0, #59491, lsl #16 ; CHECK64-NEXT: add sp, sp, #112 -; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload -; CHECK64-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z23, [sp] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK64-NEXT: add sp, sp, #64 +; CHECK64-NEXT: addvl sp, sp, #16 +; CHECK64-NEXT: .cfi_restore z8 +; CHECK64-NEXT: .cfi_restore z9 +; CHECK64-NEXT: .cfi_restore z10 +; CHECK64-NEXT: .cfi_restore z11 +; CHECK64-NEXT: .cfi_restore z12 +; CHECK64-NEXT: .cfi_restore z13 +; CHECK64-NEXT: .cfi_restore z14 +; CHECK64-NEXT: .cfi_restore z15 ; CHECK64-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -2481,20 +2577,11 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload ; CHECK64-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload -; CHECK64-NEXT: addvl sp, sp, #18 -; CHECK64-NEXT: .cfi_restore z8 -; CHECK64-NEXT: .cfi_restore z9 -; CHECK64-NEXT: .cfi_restore z10 -; CHECK64-NEXT: .cfi_restore z11 -; CHECK64-NEXT: .cfi_restore z12 -; CHECK64-NEXT: .cfi_restore z13 -; CHECK64-NEXT: .cfi_restore z14 -; CHECK64-NEXT: .cfi_restore z15 -; CHECK64-NEXT: .cfi_def_cfa wsp, 128 -; CHECK64-NEXT: ldp x26, x19, [sp, #104] // 16-byte Folded Reload -; CHECK64-NEXT: ldp x28, x27, [sp, #88] // 16-byte Folded Reload -; CHECK64-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload -; CHECK64-NEXT: add sp, sp, #128 +; CHECK64-NEXT: addvl sp, sp, #2 +; CHECK64-NEXT: .cfi_def_cfa wsp, 64 +; CHECK64-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload +; CHECK64-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload +; CHECK64-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload ; CHECK64-NEXT: .cfi_def_cfa_offset 0 ; CHECK64-NEXT: .cfi_restore w19 ; CHECK64-NEXT: .cfi_restore w26 @@ -2505,138 +2592,267 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8 ; CHECK64-NEXT: .cfi_restore w29 ; CHECK64-NEXT: ret ; -; CHECK1024-LABEL: svecc_alloca_call: -; CHECK1024: // %bb.0: // %entry -; CHECK1024-NEXT: sub sp, sp, #1088 -; CHECK1024-NEXT: .cfi_def_cfa_offset 1088 -; CHECK1024-NEXT: cntd x9 -; CHECK1024-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill -; CHECK1024-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill -; CHECK1024-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill -; CHECK1024-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill -; CHECK1024-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill -; CHECK1024-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill -; CHECK1024-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill -; CHECK1024-NEXT: add x29, sp, #1024 -; CHECK1024-NEXT: .cfi_def_cfa w29, 64 -; CHECK1024-NEXT: .cfi_offset w19, -16 -; CHECK1024-NEXT: .cfi_offset w26, -24 -; CHECK1024-NEXT: .cfi_offset w27, -32 -; CHECK1024-NEXT: .cfi_offset w28, -40 -; CHECK1024-NEXT: .cfi_offset vg, -48 -; CHECK1024-NEXT: .cfi_offset w30, -56 -; CHECK1024-NEXT: .cfi_offset w29, -64 -; CHECK1024-NEXT: addvl sp, sp, #-18 -; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill -; CHECK1024-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill -; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * IncomingVG - 1088 -; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * IncomingVG - 1088 -; CHECK1024-NEXT: sub sp, sp, #1072 -; CHECK1024-NEXT: bl __arm_sme_state -; CHECK1024-NEXT: mov x19, x0 -; CHECK1024-NEXT: //APP -; CHECK1024-NEXT: //NO_APP -; CHECK1024-NEXT: tbz w19, #0, .LBB29_2 -; CHECK1024-NEXT: // %bb.1: // %entry -; CHECK1024-NEXT: smstop sm -; CHECK1024-NEXT: .LBB29_2: // %entry -; CHECK1024-NEXT: mov x0, sp -; CHECK1024-NEXT: mov w1, #45 // =0x2d -; CHECK1024-NEXT: mov w2, #37 // =0x25 -; CHECK1024-NEXT: bl memset -; CHECK1024-NEXT: tbz w19, #0, .LBB29_4 -; CHECK1024-NEXT: // %bb.3: // %entry -; CHECK1024-NEXT: smstart sm -; CHECK1024-NEXT: .LBB29_4: // %entry -; CHECK1024-NEXT: mov w0, #22647 // =0x5877 -; CHECK1024-NEXT: movk w0, #59491, lsl #16 -; CHECK1024-NEXT: add sp, sp, #1072 -; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload -; CHECK1024-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload -; CHECK1024-NEXT: addvl sp, sp, #18 -; CHECK1024-NEXT: .cfi_restore z8 -; CHECK1024-NEXT: .cfi_restore z9 -; CHECK1024-NEXT: .cfi_restore z10 -; CHECK1024-NEXT: .cfi_restore z11 -; CHECK1024-NEXT: .cfi_restore z12 -; CHECK1024-NEXT: .cfi_restore z13 -; CHECK1024-NEXT: .cfi_restore z14 -; CHECK1024-NEXT: .cfi_restore z15 -; CHECK1024-NEXT: .cfi_def_cfa wsp, 1088 -; CHECK1024-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload -; CHECK1024-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload -; CHECK1024-NEXT: add sp, sp, #1088 -; CHECK1024-NEXT: .cfi_def_cfa_offset 0 -; CHECK1024-NEXT: .cfi_restore w19 -; CHECK1024-NEXT: .cfi_restore w26 -; CHECK1024-NEXT: .cfi_restore w27 -; CHECK1024-NEXT: .cfi_restore w28 -; CHECK1024-NEXT: .cfi_restore vg -; CHECK1024-NEXT: .cfi_restore w30 -; CHECK1024-NEXT: .cfi_restore w29 -; CHECK1024-NEXT: ret +; CHECK1024-NOSPLITSVE-LABEL: svecc_alloca_call: +; CHECK1024-NOSPLITSVE: // %bb.0: // %entry +; CHECK1024-NOSPLITSVE-NEXT: sub sp, sp, #1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa_offset 1088 +; CHECK1024-NOSPLITSVE-NEXT: cntd x9 +; CHECK1024-NOSPLITSVE-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: add x29, sp, #1024 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa w29, 64 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w19, -16 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w26, -24 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w27, -32 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w28, -40 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset vg, -48 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w30, -56 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w29, -64 +; CHECK1024-NOSPLITSVE-NEXT: addvl sp, sp, #-18 +; CHECK1024-NOSPLITSVE-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * IncomingVG - 1088 +; CHECK1024-NOSPLITSVE-NEXT: sub sp, sp, #1072 +; CHECK1024-NOSPLITSVE-NEXT: bl __arm_sme_state +; CHECK1024-NOSPLITSVE-NEXT: mov x19, x0 +; CHECK1024-NOSPLITSVE-NEXT: //APP +; CHECK1024-NOSPLITSVE-NEXT: //NO_APP +; CHECK1024-NOSPLITSVE-NEXT: tbz w19, #0, .LBB29_2 +; CHECK1024-NOSPLITSVE-NEXT: // %bb.1: // %entry +; CHECK1024-NOSPLITSVE-NEXT: smstop sm +; CHECK1024-NOSPLITSVE-NEXT: .LBB29_2: // %entry +; CHECK1024-NOSPLITSVE-NEXT: mov x0, sp +; CHECK1024-NOSPLITSVE-NEXT: mov w1, #45 // =0x2d +; CHECK1024-NOSPLITSVE-NEXT: mov w2, #37 // =0x25 +; CHECK1024-NOSPLITSVE-NEXT: bl memset +; CHECK1024-NOSPLITSVE-NEXT: tbz w19, #0, .LBB29_4 +; CHECK1024-NOSPLITSVE-NEXT: // %bb.3: // %entry +; CHECK1024-NOSPLITSVE-NEXT: smstart sm +; CHECK1024-NOSPLITSVE-NEXT: .LBB29_4: // %entry +; CHECK1024-NOSPLITSVE-NEXT: mov w0, #22647 // =0x5877 +; CHECK1024-NOSPLITSVE-NEXT: movk w0, #59491, lsl #16 +; CHECK1024-NOSPLITSVE-NEXT: add sp, sp, #1072 +; CHECK1024-NOSPLITSVE-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: addvl sp, sp, #18 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z8 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z9 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z10 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z11 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z12 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z13 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z14 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z15 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa wsp, 1088 +; CHECK1024-NOSPLITSVE-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload +; CHECK1024-NOSPLITSVE-NEXT: add sp, sp, #1088 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa_offset 0 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w19 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w26 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w27 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w28 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore vg +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w30 +; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w29 +; CHECK1024-NOSPLITSVE-NEXT: ret +; +; CHECK1024-SPLITSVE-LABEL: svecc_alloca_call: +; CHECK1024-SPLITSVE: // %bb.0: // %entry +; CHECK1024-SPLITSVE-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa_offset 64 +; CHECK1024-SPLITSVE-NEXT: cntd x9 +; CHECK1024-SPLITSVE-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str x9, [sp, #16] // 8-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: mov x29, sp +; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa w29, 64 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w19, -8 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w26, -16 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w27, -24 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w28, -32 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset vg, -48 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w30, -56 +; CHECK1024-SPLITSVE-NEXT: .cfi_offset w29, -64 +; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #-2 +; CHECK1024-SPLITSVE-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: sub sp, sp, #1024 +; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #-16 +; CHECK1024-SPLITSVE-NEXT: str z23, [sp] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 24 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 32 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 40 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 48 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 56 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 64 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 72 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 80 * IncomingVG - 1088 +; CHECK1024-SPLITSVE-NEXT: sub sp, sp, #1072 +; CHECK1024-SPLITSVE-NEXT: bl __arm_sme_state +; CHECK1024-SPLITSVE-NEXT: mov x19, x0 +; CHECK1024-SPLITSVE-NEXT: //APP +; CHECK1024-SPLITSVE-NEXT: //NO_APP +; CHECK1024-SPLITSVE-NEXT: tbz w19, #0, .LBB29_2 +; CHECK1024-SPLITSVE-NEXT: // %bb.1: // %entry +; CHECK1024-SPLITSVE-NEXT: smstop sm +; CHECK1024-SPLITSVE-NEXT: .LBB29_2: // %entry +; CHECK1024-SPLITSVE-NEXT: mov x0, sp +; CHECK1024-SPLITSVE-NEXT: mov w1, #45 // =0x2d +; CHECK1024-SPLITSVE-NEXT: mov w2, #37 // =0x25 +; CHECK1024-SPLITSVE-NEXT: bl memset +; CHECK1024-SPLITSVE-NEXT: tbz w19, #0, .LBB29_4 +; CHECK1024-SPLITSVE-NEXT: // %bb.3: // %entry +; CHECK1024-SPLITSVE-NEXT: smstart sm +; CHECK1024-SPLITSVE-NEXT: .LBB29_4: // %entry +; CHECK1024-SPLITSVE-NEXT: mov w0, #22647 // =0x5877 +; CHECK1024-SPLITSVE-NEXT: movk w0, #59491, lsl #16 +; CHECK1024-SPLITSVE-NEXT: add sp, sp, #1072 +; CHECK1024-SPLITSVE-NEXT: ldr z23, [sp] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: add sp, sp, #1024 +; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #16 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z8 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z9 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z10 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z11 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z12 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z13 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z14 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore z15 +; CHECK1024-SPLITSVE-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #2 +; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa wsp, 64 +; CHECK1024-SPLITSVE-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload +; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa_offset 0 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w19 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w26 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w27 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w28 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore vg +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w30 +; CHECK1024-SPLITSVE-NEXT: .cfi_restore w29 +; CHECK1024-SPLITSVE-NEXT: ret entry: tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2 diff --git a/llvm/test/CodeGen/AArch64/sve-int-mul-neg.ll b/llvm/test/CodeGen/AArch64/sve-int-mul-neg.ll new file mode 100644 index 0000000..a1065bc --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-int-mul-neg.ll @@ -0,0 +1,131 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -verify-machineinstrs -mattr=+sve < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; Muls with (-1) as operand should fold to neg. +define <vscale x 16 x i8> @mul_neg_fold_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) { +; CHECK-LABEL: mul_neg_fold_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.b, p0/m, z0.b +; CHECK-NEXT: ret + %1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 -1)) + ret <vscale x 16 x i8> %1 +} + +define <vscale x 8 x i16> @mul_neg_fold_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) { +; CHECK-LABEL: mul_neg_fold_i16: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.h, p0/m, z0.h +; CHECK-NEXT: ret + %1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 -1)) + ret <vscale x 8 x i16> %1 +} + +define <vscale x 4 x i32> @mul_neg_fold_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) { +; CHECK-LABEL: mul_neg_fold_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.s, p0/m, z0.s +; CHECK-NEXT: ret + %1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 -1)) + ret <vscale x 4 x i32> %1 +} + +define <vscale x 2 x i64> @mul_neg_fold_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) { +; CHECK-LABEL: mul_neg_fold_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.d, p0/m, z0.d +; CHECK-NEXT: ret + %1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 -1)) + ret <vscale x 2 x i64> %1 +} + +define <vscale x 16 x i8> @mul_neg_fold_u_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) { +; CHECK-LABEL: mul_neg_fold_u_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.b, p0/m, z0.b +; CHECK-NEXT: ret + %1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat(i8 -1)) + ret <vscale x 16 x i8> %1 +} + +define <vscale x 8 x i16> @mul_neg_fold_u_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) { +; CHECK-LABEL: mul_neg_fold_u_i16: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.h, p0/m, z0.h +; CHECK-NEXT: ret + %1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat(i16 -1)) + ret <vscale x 8 x i16> %1 +} + +define <vscale x 4 x i32> @mul_neg_fold_u_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) { +; CHECK-LABEL: mul_neg_fold_u_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.s, p0/m, z0.s +; CHECK-NEXT: ret + %1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> splat(i32 -1)) + ret <vscale x 4 x i32> %1 +} + +define <vscale x 2 x i64> @mul_neg_fold_u_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) { +; CHECK-LABEL: mul_neg_fold_u_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: neg z0.d, p0/m, z0.d +; CHECK-NEXT: ret + %1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat(i64 -1)) + ret <vscale x 2 x i64> %1 +} + +define <vscale x 16 x i8> @mul_neg_fold_different_argument_order_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) { +; CHECK-LABEL: mul_neg_fold_different_argument_order_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z1.b, #-1 // =0xffffffffffffffff +; CHECK-NEXT: neg z1.b, p0/m, z0.b +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %1 = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> splat(i8 -1), <vscale x 16 x i8> %a) + ret <vscale x 16 x i8> %1 +} + +define <vscale x 8 x i16> @mul_neg_fold_different_argument_order_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) { +; CHECK-LABEL: mul_neg_fold_different_argument_order_i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z1.h, #-1 // =0xffffffffffffffff +; CHECK-NEXT: neg z1.h, p0/m, z0.h +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %1 = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> splat(i16 -1), <vscale x 8 x i16> %a) + ret <vscale x 8 x i16> %1 +} + +define <vscale x 4 x i32> @mul_neg_fold_different_argument_order_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) { +; CHECK-LABEL: mul_neg_fold_different_argument_order_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z1.s, #-1 // =0xffffffffffffffff +; CHECK-NEXT: neg z1.s, p0/m, z0.s +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %1 = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> splat(i32 -1), <vscale x 4 x i32> %a) + ret <vscale x 4 x i32> %1 +} + +define <vscale x 2 x i64> @mul_neg_fold_different_argument_order_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) { +; CHECK-LABEL: mul_neg_fold_different_argument_order_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z1.d, #-1 // =0xffffffffffffffff +; CHECK-NEXT: neg z1.d, p0/m, z0.d +; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: ret + %1 = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> splat(i64 -1), <vscale x 2 x i64> %a) + ret <vscale x 2 x i64> %1 +} + +declare <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) +declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) + +declare <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) +declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) diff --git a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll index 2cbb29e..d8de12c 100644 --- a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll +++ b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll @@ -672,5 +672,3 @@ entry: ret i32 %x } declare void @other() -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; CHECK-FRAMELAYOUT: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll index 41fda6d..efa51ea 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/load-constant.96.ll @@ -90,26 +90,24 @@ define <3 x i32> @v_load_constant_v3i32_align1(ptr addrspace(4) %ptr) { ; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v10, v[0:1], off offset:8 ; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v11, v[0:1], off offset:9 ; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v12, v[0:1], off offset:11 -; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v0, v[0:1], off offset:10 +; GFX1250-NOUNALIGNED-NEXT: global_load_u8 v13, v[0:1], off offset:10 ; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0xa ; GFX1250-NOUNALIGNED-NEXT: s_wait_xcnt 0x0 -; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v1, v3, 8, v2 +; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v0, v3, 8, v2 ; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x8 -; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v3, 16, v4 :: v_dual_lshlrev_b32 v2, 24, v5 +; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v2, 16, v4 :: v_dual_lshlrev_b32 v1, 24, v5 ; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x6 -; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v4, v7, 8, v6 +; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v3, v7, 8, v6 ; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x4 -; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v6, 16, v8 :: v_dual_lshlrev_b32 v5, 24, v9 +; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v5, 16, v8 :: v_dual_lshlrev_b32 v4, 24, v9 +; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v0, v1, v2, v0 ; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x2 -; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v7, v11, 8, v10 -; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x1 -; GFX1250-NOUNALIGNED-NEXT: v_lshlrev_b32_e32 v8, 24, v12 +; GFX1250-NOUNALIGNED-NEXT: v_lshl_or_b32 v6, v11, 8, v10 ; GFX1250-NOUNALIGNED-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NOUNALIGNED-NEXT: v_lshlrev_b32_e32 v9, 16, v0 -; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v0, v2, v3, v1 -; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v1, v5, v6, v4 -; GFX1250-NOUNALIGNED-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v2, v8, v9, v7 +; GFX1250-NOUNALIGNED-NEXT: v_dual_lshlrev_b32 v7, 24, v12 :: v_dual_lshlrev_b32 v8, 16, v13 +; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v1, v4, v5, v3 +; GFX1250-NOUNALIGNED-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-NOUNALIGNED-NEXT: v_or3_b32 v2, v7, v8, v6 ; GFX1250-NOUNALIGNED-NEXT: s_set_pc_i64 s[30:31] ; ; GFX9-UNALIGNED-LABEL: v_load_constant_v3i32_align1: @@ -942,7 +940,7 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align1(ptr addrspace(4) inreg ; ; GFX1250-NOUNALIGNED-LABEL: s_load_constant_v3i32_align1: ; GFX1250-NOUNALIGNED: ; %bb.0: -; GFX1250-NOUNALIGNED-NEXT: s_clause 0xa +; GFX1250-NOUNALIGNED-NEXT: s_clause 0xb ; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s2, s[0:1], 0x1 ; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s3, s[0:1], 0x3 ; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s4, s[0:1], 0x2 @@ -954,27 +952,26 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align1(ptr addrspace(4) inreg ; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s10, s[0:1], 0x0 ; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s11, s[0:1], 0x4 ; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s12, s[0:1], 0xa -; GFX1250-NOUNALIGNED-NEXT: s_wait_xcnt 0x0 -; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s1, s[0:1], 0x8 +; GFX1250-NOUNALIGNED-NEXT: s_load_u8 s13, s[0:1], 0x8 ; GFX1250-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s0, s2, 8 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s2, s3, 24 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s3, s4, 16 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s4, s5, 8 -; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s2, s3 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s5, s6, 24 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s6, s7, 16 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s7, s8, 8 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s1, s3, 24 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s2, s4, 16 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s3, s5, 8 +; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s1, s1, s2 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s4, s6, 24 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s5, s7, 16 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s6, s8, 8 ; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s0, s0, s10 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s8, s9, 24 -; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s0, s2, s0 -; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s2, s12, 16 -; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s3, s4, s11 -; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s4, s5, s6 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s7, s9, 24 +; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s0, s1, s0 +; GFX1250-NOUNALIGNED-NEXT: s_lshl_b32 s1, s12, 16 +; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s3, s11 +; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s3, s4, s5 +; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s4, s6, s13 ; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s5, s7, s1 -; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s8, s2 -; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s1, s4, s3 -; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s2, s5 +; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s1, s3, s2 +; GFX1250-NOUNALIGNED-NEXT: s_or_b32 s2, s5, s4 ; GFX1250-NOUNALIGNED-NEXT: ; return to shader part epilog ; ; GFX9-UNALIGNED-LABEL: s_load_constant_v3i32_align1: @@ -1351,11 +1348,25 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align2(ptr addrspace(4) inreg } define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align4(ptr addrspace(4) inreg %ptr) { -; GFX12-LABEL: s_load_constant_v3i32_align4: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: ; return to shader part epilog +; GFX12-UNALIGNED-LABEL: s_load_constant_v3i32_align4: +; GFX12-UNALIGNED: ; %bb.0: +; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-UNALIGNED-NEXT: ; return to shader part epilog +; +; GFX12-NOUNALIGNED-LABEL: s_load_constant_v3i32_align4: +; GFX12-NOUNALIGNED: ; %bb.0: +; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog +; +; GFX1250-LABEL: s_load_constant_v3i32_align4: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_mov_b32 s4, s0 +; GFX1250-NEXT: s_mov_b32 s5, s1 +; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_load_constant_v3i32_align4: ; GFX9: ; %bb.0: @@ -1388,11 +1399,25 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align4(ptr addrspace(4) inreg } define amdgpu_ps i96 @s_load_constant_i96_align8(ptr addrspace(4) inreg %ptr) { -; GFX12-LABEL: s_load_constant_i96_align8: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: ; return to shader part epilog +; GFX12-UNALIGNED-LABEL: s_load_constant_i96_align8: +; GFX12-UNALIGNED: ; %bb.0: +; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-UNALIGNED-NEXT: ; return to shader part epilog +; +; GFX12-NOUNALIGNED-LABEL: s_load_constant_i96_align8: +; GFX12-NOUNALIGNED: ; %bb.0: +; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog +; +; GFX1250-LABEL: s_load_constant_i96_align8: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_mov_b32 s4, s0 +; GFX1250-NEXT: s_mov_b32 s5, s1 +; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_load_constant_i96_align8: ; GFX9: ; %bb.0: @@ -1425,11 +1450,25 @@ define amdgpu_ps i96 @s_load_constant_i96_align8(ptr addrspace(4) inreg %ptr) { } define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align8(ptr addrspace(4) inreg %ptr) { -; GFX12-LABEL: s_load_constant_v3i32_align8: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: ; return to shader part epilog +; GFX12-UNALIGNED-LABEL: s_load_constant_v3i32_align8: +; GFX12-UNALIGNED: ; %bb.0: +; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-UNALIGNED-NEXT: ; return to shader part epilog +; +; GFX12-NOUNALIGNED-LABEL: s_load_constant_v3i32_align8: +; GFX12-NOUNALIGNED: ; %bb.0: +; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog +; +; GFX1250-LABEL: s_load_constant_v3i32_align8: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_mov_b32 s4, s0 +; GFX1250-NEXT: s_mov_b32 s5, s1 +; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_load_constant_v3i32_align8: ; GFX9: ; %bb.0: @@ -1462,11 +1501,25 @@ define amdgpu_ps <3 x i32> @s_load_constant_v3i32_align8(ptr addrspace(4) inreg } define amdgpu_ps <3 x i32> @s_load_constant_v6i16_align8(ptr addrspace(4) inreg %ptr) { -; GFX12-LABEL: s_load_constant_v6i16_align8: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: ; return to shader part epilog +; GFX12-UNALIGNED-LABEL: s_load_constant_v6i16_align8: +; GFX12-UNALIGNED: ; %bb.0: +; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-UNALIGNED-NEXT: ; return to shader part epilog +; +; GFX12-NOUNALIGNED-LABEL: s_load_constant_v6i16_align8: +; GFX12-NOUNALIGNED: ; %bb.0: +; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog +; +; GFX1250-LABEL: s_load_constant_v6i16_align8: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_mov_b32 s4, s0 +; GFX1250-NEXT: s_mov_b32 s5, s1 +; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_load_constant_v6i16_align8: ; GFX9: ; %bb.0: @@ -1500,24 +1553,64 @@ define amdgpu_ps <3 x i32> @s_load_constant_v6i16_align8(ptr addrspace(4) inreg } define amdgpu_ps <12 x i8> @s_load_constant_v12i8_align8(ptr addrspace(4) inreg %ptr) { -; GFX12-LABEL: s_load_constant_v12i8_align8: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_lshr_b32 s13, s0, 8 -; GFX12-NEXT: s_lshr_b32 s12, s0, 16 -; GFX12-NEXT: s_lshr_b32 s3, s0, 24 -; GFX12-NEXT: s_lshr_b32 s5, s1, 8 -; GFX12-NEXT: s_lshr_b32 s6, s1, 16 -; GFX12-NEXT: s_lshr_b32 s7, s1, 24 -; GFX12-NEXT: s_lshr_b32 s9, s2, 8 -; GFX12-NEXT: s_lshr_b32 s10, s2, 16 -; GFX12-NEXT: s_lshr_b32 s11, s2, 24 -; GFX12-NEXT: s_mov_b32 s4, s1 -; GFX12-NEXT: s_mov_b32 s8, s2 -; GFX12-NEXT: s_mov_b32 s1, s13 -; GFX12-NEXT: s_mov_b32 s2, s12 -; GFX12-NEXT: ; return to shader part epilog +; GFX12-UNALIGNED-LABEL: s_load_constant_v12i8_align8: +; GFX12-UNALIGNED: ; %bb.0: +; GFX12-UNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-UNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s13, s0, 8 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s12, s0, 16 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s3, s0, 24 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s5, s1, 8 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s6, s1, 16 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s7, s1, 24 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s9, s2, 8 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s10, s2, 16 +; GFX12-UNALIGNED-NEXT: s_lshr_b32 s11, s2, 24 +; GFX12-UNALIGNED-NEXT: s_mov_b32 s4, s1 +; GFX12-UNALIGNED-NEXT: s_mov_b32 s8, s2 +; GFX12-UNALIGNED-NEXT: s_mov_b32 s1, s13 +; GFX12-UNALIGNED-NEXT: s_mov_b32 s2, s12 +; GFX12-UNALIGNED-NEXT: ; return to shader part epilog +; +; GFX12-NOUNALIGNED-LABEL: s_load_constant_v12i8_align8: +; GFX12-NOUNALIGNED: ; %bb.0: +; GFX12-NOUNALIGNED-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-NOUNALIGNED-NEXT: s_wait_kmcnt 0x0 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s13, s0, 8 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s12, s0, 16 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s3, s0, 24 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s5, s1, 8 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s6, s1, 16 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s7, s1, 24 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s9, s2, 8 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s10, s2, 16 +; GFX12-NOUNALIGNED-NEXT: s_lshr_b32 s11, s2, 24 +; GFX12-NOUNALIGNED-NEXT: s_mov_b32 s4, s1 +; GFX12-NOUNALIGNED-NEXT: s_mov_b32 s8, s2 +; GFX12-NOUNALIGNED-NEXT: s_mov_b32 s1, s13 +; GFX12-NOUNALIGNED-NEXT: s_mov_b32 s2, s12 +; GFX12-NOUNALIGNED-NEXT: ; return to shader part epilog +; +; GFX1250-LABEL: s_load_constant_v12i8_align8: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_mov_b32 s4, s0 +; GFX1250-NEXT: s_mov_b32 s5, s1 +; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_lshr_b32 s13, s0, 8 +; GFX1250-NEXT: s_lshr_b32 s12, s0, 16 +; GFX1250-NEXT: s_lshr_b32 s3, s0, 24 +; GFX1250-NEXT: s_lshr_b32 s5, s1, 8 +; GFX1250-NEXT: s_lshr_b32 s6, s1, 16 +; GFX1250-NEXT: s_lshr_b32 s7, s1, 24 +; GFX1250-NEXT: s_lshr_b32 s9, s2, 8 +; GFX1250-NEXT: s_lshr_b32 s10, s2, 16 +; GFX1250-NEXT: s_lshr_b32 s11, s2, 24 +; GFX1250-NEXT: s_mov_b32 s4, s1 +; GFX1250-NEXT: s_mov_b32 s8, s2 +; GFX1250-NEXT: s_mov_b32 s1, s13 +; GFX1250-NEXT: s_mov_b32 s2, s12 +; GFX1250-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_load_constant_v12i8_align8: ; GFX9: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll index aac499f..b486fabb 100644 --- a/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll +++ b/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll @@ -9,15 +9,14 @@ target triple = "amdgcn-amd-amdhsa" define amdgpu_kernel void @use_private_to_flat_addrspacecast(ptr addrspace(5) %ptr) { ; GFX1250-SDAG-LABEL: use_private_to_flat_addrspacecast: ; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x24 +; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_lshlrev_b32 v1, 20, v0 -; GFX1250-SDAG-NEXT: s_cmp_lg_u32 s2, -1 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_lshlrev_b32 v1, 20, v0 +; GFX1250-SDAG-NEXT: s_cmp_lg_u32 s0, -1 ; GFX1250-SDAG-NEXT: s_cselect_b32 vcc_lo, -1, 0 -; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] +; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], src_flat_scratch_base_lo, v[0:1] ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_cndmask_b32 v1, 0, v1 ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc_lo @@ -56,13 +55,11 @@ define amdgpu_kernel void @use_private_to_flat_addrspacecast_nonnull(ptr addrspa ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 20, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, s0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] +; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], src_flat_scratch_base_lo, v[0:1] ; GFX1250-SDAG-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SYS ; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0 ; GFX1250-SDAG-NEXT: s_endpgm @@ -91,10 +88,9 @@ define amdgpu_kernel void @use_flat_to_private_addrspacecast(ptr %ptr) { ; GFX1250-LABEL: use_flat_to_private_addrspacecast: ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo ; GFX1250-NEXT: v_mov_b32_e32 v0, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo ; GFX1250-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-NEXT: s_cselect_b32 s0, s2, -1 ; GFX1250-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS @@ -110,9 +106,8 @@ define amdgpu_kernel void @use_flat_to_private_addrspacecast_nonnull(ptr %ptr) { ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: s_sub_co_i32 s0, s0, s1 +; GFX1250-SDAG-NEXT: s_sub_co_i32 s0, s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS ; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0 ; GFX1250-SDAG-NEXT: s_endpgm @@ -122,9 +117,7 @@ define amdgpu_kernel void @use_flat_to_private_addrspacecast_nonnull(ptr %ptr) { ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s0, s1 +; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS ; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0 ; GFX1250-GISEL-NEXT: s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll index ef52694..54871a6 100644 --- a/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll +++ b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll @@ -538,58 +538,61 @@ define double @flat_system_atomic_fadd_f64(ptr %ptr, double %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base ; GFX1250-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 -; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v5 ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB34_6 -; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1 -; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_cbranch_execnz .LBB34_3 +; GFX1250-NEXT: ; %bb.1: ; %Flow2 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB34_8 +; GFX1250-NEXT: .LBB34_2: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: .LBB34_3: ; %atomicrmw.check.private +; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1 -; GFX1250-NEXT: s_cbranch_execz .LBB34_3 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global -; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_cbranch_execz .LBB34_5 +; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.global +; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[4:5], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB34_3: ; %Flow +; GFX1250-NEXT: .LBB34_5: ; %Flow ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1 -; GFX1250-NEXT: s_cbranch_execz .LBB34_5 -; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_cbranch_execz .LBB34_7 +; GFX1250-NEXT: ; %bb.6: ; %atomicrmw.private +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo -; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3] -; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off -; GFX1250-NEXT: .LBB34_5: ; %Flow1 +; GFX1250-NEXT: v_add_f64_e32 v[2:3], v[0:1], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v4, v[2:3], off +; GFX1250-NEXT: .LBB34_7: ; %Flow1 ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB34_6: ; %Flow2 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB34_8 -; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo +; GFX1250-NEXT: s_cbranch_execz .LBB34_2 +; GFX1250-NEXT: .LBB34_8: ; %atomicrmw.shared +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3] -; GFX1250-NEXT: .LBB34_8: ; %atomicrmw.phi +; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo +; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_wait_dscnt 0x0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fadd ptr %ptr, double %val monotonic ret double %result @@ -600,58 +603,61 @@ define double @flat_one_as_atomic_fadd_f64(ptr %ptr, double %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 ; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base ; GFX1250-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 -; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v5 ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB35_6 -; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1 -; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: s_cbranch_execnz .LBB35_3 +; GFX1250-NEXT: ; %bb.1: ; %Flow2 +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB35_8 +; GFX1250-NEXT: .LBB35_2: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: .LBB35_3: ; %atomicrmw.check.private +; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1 -; GFX1250-NEXT: s_cbranch_execz .LBB35_3 -; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global -; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_cbranch_execz .LBB35_5 +; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.global +; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[4:5], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB35_3: ; %Flow +; GFX1250-NEXT: .LBB35_5: ; %Flow ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1 -; GFX1250-NEXT: s_cbranch_execz .LBB35_5 -; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_cbranch_execz .LBB35_7 +; GFX1250-NEXT: ; %bb.6: ; %atomicrmw.private +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo -; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3] -; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off -; GFX1250-NEXT: .LBB35_5: ; %Flow1 +; GFX1250-NEXT: v_add_f64_e32 v[2:3], v[0:1], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v4, v[2:3], off +; GFX1250-NEXT: .LBB35_7: ; %Flow1 ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1 -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB35_6: ; %Flow2 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB35_8 -; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo +; GFX1250-NEXT: s_cbranch_execz .LBB35_2 +; GFX1250-NEXT: .LBB35_8: ; %atomicrmw.shared +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3] -; GFX1250-NEXT: .LBB35_8: ; %atomicrmw.phi +; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo +; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3] ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 +; GFX1250-NEXT: s_wait_dscnt 0x0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fadd ptr %ptr, double %val syncscope("one-as") monotonic ret double %result @@ -686,40 +692,42 @@ define double @flat_system_atomic_fmin_f64(ptr %ptr, double %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB38_2 -; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global -; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_cbranch_execnz .LBB38_3 +; GFX1250-NEXT: ; %bb.1: ; %Flow +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB38_4 +; GFX1250-NEXT: .LBB38_2: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: .LBB38_3: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_num_f64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB38_2: ; %Flow ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB38_4 -; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_cbranch_execz .LBB38_2 +; GFX1250-NEXT: .LBB38_4: ; %atomicrmw.private +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo -; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v0, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] -; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3] -; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off -; GFX1250-NEXT: .LBB38_4: ; %atomicrmw.phi +; GFX1250-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1] +; GFX1250-NEXT: v_min_num_f64_e32 v[2:3], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[2:3], off ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fmin ptr %ptr, double %val monotonic ret double %result @@ -730,40 +738,42 @@ define double @flat_one_as_atomic_fmin_f64(ptr %ptr, double %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB39_2 -; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global -; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_cbranch_execnz .LBB39_3 +; GFX1250-NEXT: ; %bb.1: ; %Flow +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB39_4 +; GFX1250-NEXT: .LBB39_2: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: .LBB39_3: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_min_num_f64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB39_2: ; %Flow ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB39_4 -; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_cbranch_execz .LBB39_2 +; GFX1250-NEXT: .LBB39_4: ; %atomicrmw.private +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo -; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v0, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] -; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3] -; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off -; GFX1250-NEXT: .LBB39_4: ; %atomicrmw.phi +; GFX1250-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1] +; GFX1250-NEXT: v_min_num_f64_e32 v[2:3], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[2:3], off ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fmin ptr %ptr, double %val syncscope("one-as") monotonic ret double %result @@ -798,40 +808,42 @@ define double @flat_system_atomic_fmax_f64(ptr %ptr, double %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB42_2 -; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global -; GFX1250-NEXT: flat_atomic_max_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_cbranch_execnz .LBB42_3 +; GFX1250-NEXT: ; %bb.1: ; %Flow +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB42_4 +; GFX1250-NEXT: .LBB42_2: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: .LBB42_3: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_num_f64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB42_2: ; %Flow ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB42_4 -; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_cbranch_execz .LBB42_2 +; GFX1250-NEXT: .LBB42_4: ; %atomicrmw.private +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo -; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v0, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] -; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3] -; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off -; GFX1250-NEXT: .LBB42_4: ; %atomicrmw.phi +; GFX1250-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1] +; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[2:3], off ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fmax ptr %ptr, double %val monotonic ret double %result @@ -842,40 +854,42 @@ define double @flat_one_as_atomic_fmax_f64(ptr %ptr, double %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 -; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 +; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 +; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB43_2 -; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global -; GFX1250-NEXT: flat_atomic_max_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1 +; GFX1250-NEXT: s_cbranch_execnz .LBB43_3 +; GFX1250-NEXT: ; %bb.1: ; %Flow +; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB43_4 +; GFX1250-NEXT: .LBB43_2: ; %atomicrmw.phi +; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: .LBB43_3: ; %atomicrmw.global +; GFX1250-NEXT: flat_atomic_max_num_f64 v[0:1], v[4:5], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-NEXT: .LBB43_2: ; %Flow ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 -; GFX1250-NEXT: s_cbranch_execz .LBB43_4 -; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo -; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] +; GFX1250-NEXT: s_cbranch_execz .LBB43_2 +; GFX1250-NEXT: .LBB43_4: ; %atomicrmw.private +; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo -; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off +; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v0, vcc_lo +; GFX1250-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5] -; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3] -; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off -; GFX1250-NEXT: .LBB43_4: ; %atomicrmw.phi +; GFX1250-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1] +; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[2:3] +; GFX1250-NEXT: scratch_store_b64 v6, v[2:3], off ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %result = atomicrmw fmax ptr %ptr, double %val syncscope("one-as") monotonic ret double %result @@ -982,13 +996,11 @@ define i64 @flat_one_as_atomic_min_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB52_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1000,10 +1012,9 @@ define i64 @flat_one_as_atomic_min_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB52_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off @@ -1025,13 +1036,11 @@ define i64 @flat_system_atomic_min_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB53_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1043,10 +1052,9 @@ define i64 @flat_system_atomic_min_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB53_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off @@ -1068,13 +1076,11 @@ define i64 @flat_one_as_atomic_max_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB54_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1086,10 +1092,9 @@ define i64 @flat_one_as_atomic_max_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB54_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off @@ -1111,13 +1116,11 @@ define i64 @flat_system_atomic_max_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB55_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1129,10 +1132,9 @@ define i64 @flat_system_atomic_max_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB55_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off @@ -1154,13 +1156,11 @@ define i64 @flat_one_as_atomic_umin_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB56_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1172,10 +1172,9 @@ define i64 @flat_one_as_atomic_umin_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB56_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off @@ -1197,13 +1196,11 @@ define i64 @flat_system_atomic_umin_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB57_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1215,10 +1212,9 @@ define i64 @flat_system_atomic_umin_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB57_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off @@ -1240,13 +1236,11 @@ define i64 @flat_one_as_atomic_umax_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB58_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1258,10 +1252,9 @@ define i64 @flat_one_as_atomic_umax_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB58_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off @@ -1283,13 +1276,11 @@ define i64 @flat_system_atomic_umax_i64(ptr %ptr, i64 %val) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1 +; GFX1250-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4 ; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB59_2 ; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global @@ -1301,10 +1292,9 @@ define i64 @flat_system_atomic_umax_i64(ptr %ptr, i64 %val) { ; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-NEXT: s_cbranch_execz .LBB59_4 ; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0 +; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll index 94ba5cd..6b5647e 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16.ll @@ -569,10 +569,10 @@ define <16 x bfloat> @v_load_global_v16bf16(ptr addrspace(1) %ptr) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0 +; GFX1250-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0 ; GFX1250-NEXT: s_clause 0x1 -; GFX1250-NEXT: global_load_b128 v[0:3], v[4:5], off -; GFX1250-NEXT: global_load_b128 v[4:7], v[4:5], off offset:16 +; GFX1250-NEXT: global_load_b128 v[0:3], v[8:9], off +; GFX1250-NEXT: global_load_b128 v[4:7], v[8:9], off offset:16 ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %load = load <16 x bfloat>, ptr addrspace(1) %ptr @@ -752,12 +752,12 @@ define <32 x bfloat> @v_load_global_v32bf16(ptr addrspace(1) %ptr) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v13, v1 :: v_dual_mov_b32 v12, v0 +; GFX1250-NEXT: v_dual_mov_b32 v17, v1 :: v_dual_mov_b32 v16, v0 ; GFX1250-NEXT: s_clause 0x3 -; GFX1250-NEXT: global_load_b128 v[0:3], v[12:13], off -; GFX1250-NEXT: global_load_b128 v[4:7], v[12:13], off offset:16 -; GFX1250-NEXT: global_load_b128 v[8:11], v[12:13], off offset:32 -; GFX1250-NEXT: global_load_b128 v[12:15], v[12:13], off offset:48 +; GFX1250-NEXT: global_load_b128 v[0:3], v[16:17], off +; GFX1250-NEXT: global_load_b128 v[4:7], v[16:17], off offset:16 +; GFX1250-NEXT: global_load_b128 v[8:11], v[16:17], off offset:32 +; GFX1250-NEXT: global_load_b128 v[12:15], v[16:17], off offset:48 ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %load = load <32 x bfloat>, ptr addrspace(1) %ptr @@ -1055,16 +1055,16 @@ define <64 x bfloat> @v_load_global_v64bf16(ptr addrspace(1) %ptr) { ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v29, v1 :: v_dual_mov_b32 v28, v0 +; GFX1250-NEXT: v_dual_mov_b32 v33, v1 :: v_dual_mov_b32 v32, v0 ; GFX1250-NEXT: s_clause 0x7 -; GFX1250-NEXT: global_load_b128 v[0:3], v[28:29], off -; GFX1250-NEXT: global_load_b128 v[4:7], v[28:29], off offset:16 -; GFX1250-NEXT: global_load_b128 v[8:11], v[28:29], off offset:32 -; GFX1250-NEXT: global_load_b128 v[12:15], v[28:29], off offset:48 -; GFX1250-NEXT: global_load_b128 v[16:19], v[28:29], off offset:64 -; GFX1250-NEXT: global_load_b128 v[20:23], v[28:29], off offset:80 -; GFX1250-NEXT: global_load_b128 v[24:27], v[28:29], off offset:96 -; GFX1250-NEXT: global_load_b128 v[28:31], v[28:29], off offset:112 +; GFX1250-NEXT: global_load_b128 v[0:3], v[32:33], off +; GFX1250-NEXT: global_load_b128 v[4:7], v[32:33], off offset:16 +; GFX1250-NEXT: global_load_b128 v[8:11], v[32:33], off offset:32 +; GFX1250-NEXT: global_load_b128 v[12:15], v[32:33], off offset:48 +; GFX1250-NEXT: global_load_b128 v[16:19], v[32:33], off offset:64 +; GFX1250-NEXT: global_load_b128 v[20:23], v[32:33], off offset:80 +; GFX1250-NEXT: global_load_b128 v[24:27], v[32:33], off offset:96 +; GFX1250-NEXT: global_load_b128 v[28:31], v[32:33], off offset:112 ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] %load = load <64 x bfloat>, ptr addrspace(1) %ptr diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll index 243f0ed..f8655a7 100644 --- a/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll +++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll @@ -256,7 +256,6 @@ define amdgpu_kernel void @uniform_unconditional_min_long_forward_branch(ptr add ; GCN-NEXT: s_wait_storecnt 0x0 ; GCN-NEXT: .LBB5_3: ; %bb4 ; GCN-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GCN-NEXT: s_wait_xcnt 0x0 ; GCN-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 63 ; GCN-NEXT: s_wait_kmcnt 0x0 ; GCN-NEXT: global_store_b32 v0, v1, s[0:1] scope:SCOPE_SYS diff --git a/llvm/test/CodeGen/AMDGPU/calling-conventions.ll b/llvm/test/CodeGen/AMDGPU/calling-conventions.ll index ddd3b152..363a248 100644 --- a/llvm/test/CodeGen/AMDGPU/calling-conventions.ll +++ b/llvm/test/CodeGen/AMDGPU/calling-conventions.ll @@ -2700,142 +2700,142 @@ define amdgpu_kernel void @amd_kernel_v32i8(<32 x i8> %arg0) { ; ; GFX1250-LABEL: amd_kernel_v32i8: ; GFX1250: ; %bb.0: ; %entry -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 ; GFX1250-NEXT: v_mov_b64_e32 v[8:9], 16 ; GFX1250-NEXT: v_mov_b64_e32 v[10:11], 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_lshr_b32 s16, s0, 16 -; GFX1250-NEXT: s_lshr_b32 s17, s0, 24 -; GFX1250-NEXT: s_lshr_b32 s20, s2, 16 -; GFX1250-NEXT: s_lshr_b32 s21, s2, 24 -; GFX1250-NEXT: s_lshr_b32 s14, s7, 16 -; GFX1250-NEXT: s_lshr_b32 s15, s7, 24 -; GFX1250-NEXT: s_bfe_u32 s27, s7, 0x80008 +; GFX1250-NEXT: s_lshr_b32 s16, s8, 16 +; GFX1250-NEXT: s_lshr_b32 s17, s8, 24 +; GFX1250-NEXT: s_lshr_b32 s6, s15, 16 +; GFX1250-NEXT: s_lshr_b32 s7, s15, 24 +; GFX1250-NEXT: s_bfe_u32 s27, s15, 0x80008 ; GFX1250-NEXT: s_add_co_i32 s17, s17, s17 ; GFX1250-NEXT: s_add_co_i32 s16, s16, s16 -; GFX1250-NEXT: s_lshr_b32 s18, s1, 16 -; GFX1250-NEXT: s_lshr_b32 s19, s1, 24 -; GFX1250-NEXT: s_lshr_b32 s22, s3, 16 -; GFX1250-NEXT: s_lshr_b32 s23, s3, 24 -; GFX1250-NEXT: s_bfe_u32 s29, s1, 0x80008 -; GFX1250-NEXT: s_bfe_u32 s30, s3, 0x80008 -; GFX1250-NEXT: s_add_co_i32 s21, s21, s21 -; GFX1250-NEXT: s_add_co_i32 s20, s20, s20 ; GFX1250-NEXT: s_lshl_b32 s17, s17, 8 ; GFX1250-NEXT: s_and_b32 s16, s16, 0xff -; GFX1250-NEXT: s_add_co_i32 s7, s7, s7 -; GFX1250-NEXT: s_add_co_i32 s27, s27, s27 ; GFX1250-NEXT: s_add_co_i32 s15, s15, s15 -; GFX1250-NEXT: s_add_co_i32 s14, s14, s14 -; GFX1250-NEXT: s_add_co_i32 s3, s3, s3 +; GFX1250-NEXT: s_add_co_i32 s27, s27, s27 +; GFX1250-NEXT: s_add_co_i32 s7, s7, s7 +; GFX1250-NEXT: s_add_co_i32 s6, s6, s6 +; GFX1250-NEXT: s_or_b32 s16, s16, s17 +; GFX1250-NEXT: s_and_b32 s15, s15, 0xff +; GFX1250-NEXT: s_lshl_b32 s17, s27, 8 +; GFX1250-NEXT: s_lshl_b32 s7, s7, 8 +; GFX1250-NEXT: s_and_b32 s6, s6, 0xff +; GFX1250-NEXT: s_or_b32 s15, s15, s17 +; GFX1250-NEXT: s_or_b32 s6, s6, s7 +; GFX1250-NEXT: s_bfe_u32 s26, s14, 0x80008 +; GFX1250-NEXT: s_and_b32 s7, s15, 0xffff +; GFX1250-NEXT: s_lshl_b32 s6, s6, 16 +; GFX1250-NEXT: s_lshr_b32 s20, s10, 16 +; GFX1250-NEXT: s_lshr_b32 s21, s10, 24 +; GFX1250-NEXT: s_lshr_b32 s4, s14, 16 +; GFX1250-NEXT: s_lshr_b32 s5, s14, 24 +; GFX1250-NEXT: s_or_b32 s6, s7, s6 +; GFX1250-NEXT: s_add_co_i32 s7, s14, s14 +; GFX1250-NEXT: s_add_co_i32 s26, s26, s26 +; GFX1250-NEXT: s_lshr_b32 s18, s9, 16 +; GFX1250-NEXT: s_lshr_b32 s19, s9, 24 +; GFX1250-NEXT: s_lshr_b32 s22, s11, 16 +; GFX1250-NEXT: s_lshr_b32 s23, s11, 24 +; GFX1250-NEXT: s_bfe_u32 s29, s9, 0x80008 +; GFX1250-NEXT: s_bfe_u32 s30, s11, 0x80008 +; GFX1250-NEXT: s_add_co_i32 s21, s21, s21 +; GFX1250-NEXT: s_add_co_i32 s20, s20, s20 +; GFX1250-NEXT: s_lshr_b32 s2, s13, 16 +; GFX1250-NEXT: s_lshr_b32 s3, s13, 24 +; GFX1250-NEXT: s_and_b32 s7, s7, 0xff +; GFX1250-NEXT: s_lshl_b32 s14, s26, 8 +; GFX1250-NEXT: s_add_co_i32 s5, s5, s5 +; GFX1250-NEXT: s_add_co_i32 s4, s4, s4 +; GFX1250-NEXT: s_add_co_i32 s11, s11, s11 ; GFX1250-NEXT: s_add_co_i32 s30, s30, s30 ; GFX1250-NEXT: s_add_co_i32 s23, s23, s23 ; GFX1250-NEXT: s_add_co_i32 s22, s22, s22 ; GFX1250-NEXT: s_lshl_b32 s21, s21, 8 ; GFX1250-NEXT: s_and_b32 s20, s20, 0xff -; GFX1250-NEXT: s_add_co_i32 s1, s1, s1 +; GFX1250-NEXT: s_add_co_i32 s9, s9, s9 ; GFX1250-NEXT: s_add_co_i32 s29, s29, s29 ; GFX1250-NEXT: s_add_co_i32 s19, s19, s19 ; GFX1250-NEXT: s_add_co_i32 s18, s18, s18 -; GFX1250-NEXT: s_lshr_b32 s10, s5, 16 -; GFX1250-NEXT: s_lshr_b32 s11, s5, 24 -; GFX1250-NEXT: s_lshr_b32 s12, s6, 16 -; GFX1250-NEXT: s_lshr_b32 s13, s6, 24 -; GFX1250-NEXT: s_or_b32 s16, s16, s17 -; GFX1250-NEXT: s_and_b32 s7, s7, 0xff -; GFX1250-NEXT: s_lshl_b32 s17, s27, 8 -; GFX1250-NEXT: s_lshl_b32 s15, s15, 8 -; GFX1250-NEXT: s_and_b32 s14, s14, 0xff -; GFX1250-NEXT: s_and_b32 s3, s3, 0xff +; GFX1250-NEXT: s_bfe_u32 s25, s13, 0x80008 +; GFX1250-NEXT: s_lshl_b32 s5, s5, 8 +; GFX1250-NEXT: s_and_b32 s4, s4, 0xff +; GFX1250-NEXT: s_or_b32 s7, s7, s14 +; GFX1250-NEXT: s_add_co_i32 s3, s3, s3 +; GFX1250-NEXT: s_add_co_i32 s2, s2, s2 +; GFX1250-NEXT: s_and_b32 s11, s11, 0xff ; GFX1250-NEXT: s_lshl_b32 s30, s30, 8 ; GFX1250-NEXT: s_lshl_b32 s23, s23, 8 ; GFX1250-NEXT: s_and_b32 s22, s22, 0xff ; GFX1250-NEXT: s_or_b32 s20, s20, s21 -; GFX1250-NEXT: s_and_b32 s1, s1, 0xff +; GFX1250-NEXT: s_and_b32 s9, s9, 0xff ; GFX1250-NEXT: s_lshl_b32 s21, s29, 8 ; GFX1250-NEXT: s_lshl_b32 s19, s19, 8 ; GFX1250-NEXT: s_and_b32 s18, s18, 0xff -; GFX1250-NEXT: s_lshr_b32 s8, s4, 16 -; GFX1250-NEXT: s_lshr_b32 s9, s4, 24 -; GFX1250-NEXT: s_bfe_u32 s24, s4, 0x80008 -; GFX1250-NEXT: s_bfe_u32 s25, s5, 0x80008 -; GFX1250-NEXT: s_bfe_u32 s26, s6, 0x80008 -; GFX1250-NEXT: s_or_b32 s7, s7, s17 -; GFX1250-NEXT: s_or_b32 s14, s14, s15 -; GFX1250-NEXT: s_add_co_i32 s13, s13, s13 -; GFX1250-NEXT: s_add_co_i32 s12, s12, s12 -; GFX1250-NEXT: s_add_co_i32 s11, s11, s11 -; GFX1250-NEXT: s_add_co_i32 s10, s10, s10 -; GFX1250-NEXT: s_bfe_u32 s28, s0, 0x80008 -; GFX1250-NEXT: s_or_b32 s3, s3, s30 +; GFX1250-NEXT: s_lshr_b32 s0, s12, 16 +; GFX1250-NEXT: s_lshr_b32 s1, s12, 24 +; GFX1250-NEXT: s_bfe_u32 s24, s12, 0x80008 +; GFX1250-NEXT: s_or_b32 s4, s4, s5 +; GFX1250-NEXT: s_and_b32 s5, s7, 0xffff +; GFX1250-NEXT: s_add_co_i32 s7, s13, s13 +; GFX1250-NEXT: s_add_co_i32 s25, s25, s25 +; GFX1250-NEXT: s_lshl_b32 s3, s3, 8 +; GFX1250-NEXT: s_and_b32 s2, s2, 0xff +; GFX1250-NEXT: s_bfe_u32 s28, s8, 0x80008 +; GFX1250-NEXT: s_or_b32 s11, s11, s30 ; GFX1250-NEXT: s_or_b32 s22, s22, s23 -; GFX1250-NEXT: s_bfe_u32 s23, s2, 0x80008 -; GFX1250-NEXT: s_or_b32 s1, s1, s21 +; GFX1250-NEXT: s_bfe_u32 s23, s10, 0x80008 +; GFX1250-NEXT: s_or_b32 s9, s9, s21 ; GFX1250-NEXT: s_or_b32 s18, s18, s19 -; GFX1250-NEXT: s_and_b32 s7, s7, 0xffff -; GFX1250-NEXT: s_lshl_b32 s14, s14, 16 -; GFX1250-NEXT: s_add_co_i32 s6, s6, s6 -; GFX1250-NEXT: s_add_co_i32 s26, s26, s26 -; GFX1250-NEXT: s_lshl_b32 s13, s13, 8 -; GFX1250-NEXT: s_and_b32 s12, s12, 0xff -; GFX1250-NEXT: s_add_co_i32 s5, s5, s5 -; GFX1250-NEXT: s_add_co_i32 s25, s25, s25 -; GFX1250-NEXT: s_lshl_b32 s11, s11, 8 -; GFX1250-NEXT: s_and_b32 s10, s10, 0xff -; GFX1250-NEXT: s_add_co_i32 s4, s4, s4 +; GFX1250-NEXT: s_lshl_b32 s4, s4, 16 +; GFX1250-NEXT: s_and_b32 s7, s7, 0xff +; GFX1250-NEXT: s_lshl_b32 s13, s25, 8 +; GFX1250-NEXT: s_or_b32 s2, s2, s3 +; GFX1250-NEXT: s_add_co_i32 s3, s12, s12 ; GFX1250-NEXT: s_add_co_i32 s24, s24, s24 -; GFX1250-NEXT: s_add_co_i32 s9, s9, s9 -; GFX1250-NEXT: s_add_co_i32 s8, s8, s8 -; GFX1250-NEXT: s_and_b32 s3, s3, 0xffff +; GFX1250-NEXT: s_add_co_i32 s1, s1, s1 +; GFX1250-NEXT: s_add_co_i32 s0, s0, s0 +; GFX1250-NEXT: s_and_b32 s11, s11, 0xffff ; GFX1250-NEXT: s_lshl_b32 s22, s22, 16 -; GFX1250-NEXT: s_add_co_i32 s2, s2, s2 +; GFX1250-NEXT: s_add_co_i32 s10, s10, s10 ; GFX1250-NEXT: s_add_co_i32 s23, s23, s23 -; GFX1250-NEXT: s_and_b32 s1, s1, 0xffff +; GFX1250-NEXT: s_and_b32 s9, s9, 0xffff ; GFX1250-NEXT: s_lshl_b32 s18, s18, 16 -; GFX1250-NEXT: s_add_co_i32 s0, s0, s0 +; GFX1250-NEXT: s_add_co_i32 s8, s8, s8 ; GFX1250-NEXT: s_add_co_i32 s28, s28, s28 -; GFX1250-NEXT: s_or_b32 s7, s7, s14 -; GFX1250-NEXT: s_and_b32 s6, s6, 0xff -; GFX1250-NEXT: s_lshl_b32 s14, s26, 8 -; GFX1250-NEXT: s_or_b32 s12, s12, s13 -; GFX1250-NEXT: s_and_b32 s5, s5, 0xff -; GFX1250-NEXT: s_lshl_b32 s13, s25, 8 -; GFX1250-NEXT: s_or_b32 s10, s10, s11 -; GFX1250-NEXT: s_and_b32 s4, s4, 0xff -; GFX1250-NEXT: s_lshl_b32 s11, s24, 8 -; GFX1250-NEXT: s_lshl_b32 s9, s9, 8 -; GFX1250-NEXT: s_and_b32 s8, s8, 0xff -; GFX1250-NEXT: s_or_b32 s3, s3, s22 -; GFX1250-NEXT: s_and_b32 s2, s2, 0xff -; GFX1250-NEXT: s_lshl_b32 s22, s23, 8 -; GFX1250-NEXT: s_or_b32 s1, s1, s18 +; GFX1250-NEXT: s_or_b32 s4, s5, s4 +; GFX1250-NEXT: s_or_b32 s5, s7, s13 +; GFX1250-NEXT: s_and_b32 s3, s3, 0xff +; GFX1250-NEXT: s_lshl_b32 s7, s24, 8 +; GFX1250-NEXT: s_lshl_b32 s1, s1, 8 ; GFX1250-NEXT: s_and_b32 s0, s0, 0xff +; GFX1250-NEXT: s_or_b32 s11, s11, s22 +; GFX1250-NEXT: s_and_b32 s10, s10, 0xff +; GFX1250-NEXT: s_lshl_b32 s22, s23, 8 +; GFX1250-NEXT: s_or_b32 s9, s9, s18 +; GFX1250-NEXT: s_and_b32 s8, s8, 0xff ; GFX1250-NEXT: s_lshl_b32 s18, s28, 8 -; GFX1250-NEXT: s_or_b32 s6, s6, s14 -; GFX1250-NEXT: s_or_b32 s5, s5, s13 -; GFX1250-NEXT: s_or_b32 s4, s4, s11 -; GFX1250-NEXT: s_or_b32 s8, s8, s9 -; GFX1250-NEXT: s_or_b32 s2, s2, s22 -; GFX1250-NEXT: s_or_b32 s0, s0, s18 -; GFX1250-NEXT: s_and_b32 s6, s6, 0xffff -; GFX1250-NEXT: s_lshl_b32 s12, s12, 16 +; GFX1250-NEXT: s_or_b32 s3, s3, s7 +; GFX1250-NEXT: s_or_b32 s0, s0, s1 +; GFX1250-NEXT: s_or_b32 s10, s10, s22 +; GFX1250-NEXT: s_or_b32 s8, s8, s18 ; GFX1250-NEXT: s_and_b32 s5, s5, 0xffff -; GFX1250-NEXT: s_and_b32 s4, s4, 0xffff -; GFX1250-NEXT: s_lshl_b32 s8, s8, 16 -; GFX1250-NEXT: s_lshl_b32 s9, s10, 16 -; GFX1250-NEXT: s_and_b32 s2, s2, 0xffff +; GFX1250-NEXT: s_and_b32 s1, s3, 0xffff +; GFX1250-NEXT: s_lshl_b32 s0, s0, 16 +; GFX1250-NEXT: s_lshl_b32 s2, s2, 16 +; GFX1250-NEXT: s_and_b32 s10, s10, 0xffff ; GFX1250-NEXT: s_lshl_b32 s20, s20, 16 -; GFX1250-NEXT: s_and_b32 s0, s0, 0xffff +; GFX1250-NEXT: s_and_b32 s8, s8, 0xffff ; GFX1250-NEXT: s_lshl_b32 s16, s16, 16 -; GFX1250-NEXT: s_or_b32 s6, s6, s12 -; GFX1250-NEXT: s_or_b32 s4, s4, s8 -; GFX1250-NEXT: s_or_b32 s5, s5, s9 -; GFX1250-NEXT: s_or_b32 s2, s2, s20 -; GFX1250-NEXT: s_or_b32 s0, s0, s16 -; GFX1250-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 -; GFX1250-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 -; GFX1250-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, s1 -; GFX1250-NEXT: v_dual_mov_b32 v6, s2 :: v_dual_mov_b32 v7, s3 +; GFX1250-NEXT: s_or_b32 s0, s1, s0 +; GFX1250-NEXT: s_or_b32 s1, s5, s2 +; GFX1250-NEXT: s_or_b32 s10, s10, s20 +; GFX1250-NEXT: s_or_b32 s8, s8, s16 +; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX1250-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s6 +; GFX1250-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9 +; GFX1250-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_store_b128 v[8:9], v[0:3], off ; GFX1250-NEXT: global_store_b128 v[10:11], v[4:7], off diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll index 51652a0..2ae6fc2 100644 --- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll +++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll @@ -117,12 +117,12 @@ define amdgpu_kernel void @sadd64rr(ptr addrspace(1) %out, i64 %a, i64 %b) { ; ; GFX1250-LABEL: sadd64rr: ; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], s[4:5] +; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], s[6:7] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] @@ -818,17 +818,17 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car ; ; GFX1250-LABEL: suaddo64: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_add_nc_u64 s[6:7], s[4:5], s[6:7] -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX1250-NEXT: v_cmp_lt_u64_e64 s4, s[6:7], s[4:5] -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4 +; GFX1250-NEXT: s_add_nc_u64 s[0:1], s[12:13], s[14:15] +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-NEXT: v_cmp_lt_u64_e64 s0, s[0:1], s[12:13] +; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0 ; GFX1250-NEXT: s_clause 0x1 -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] -; GFX1250-NEXT: global_store_b8 v2, v3, s[2:3] +; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[8:9] +; GFX1250-NEXT: global_store_b8 v2, v3, s[10:11] ; GFX1250-NEXT: s_endpgm %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) %val = extractvalue { i64, i1 } %uadd, 0 @@ -1096,12 +1096,12 @@ define amdgpu_kernel void @ssub64rr(ptr addrspace(1) %out, i64 %a, i64 %b) { ; ; GFX1250-LABEL: ssub64rr: ; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_sub_nc_u64 s[2:3], s[2:3], s[4:5] +; GFX1250-NEXT: s_sub_nc_u64 s[2:3], s[2:3], s[6:7] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] @@ -1798,17 +1798,17 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car ; ; GFX1250-LABEL: susubo64: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_sub_nc_u64 s[6:7], s[4:5], s[6:7] -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX1250-NEXT: v_cmp_gt_u64_e64 s4, s[6:7], s[4:5] -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[6:7] -; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4 +; GFX1250-NEXT: s_sub_nc_u64 s[0:1], s[12:13], s[14:15] +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-NEXT: v_cmp_gt_u64_e64 s0, s[0:1], s[12:13] +; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0 ; GFX1250-NEXT: s_clause 0x1 -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] -; GFX1250-NEXT: global_store_b8 v2, v3, s[2:3] +; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[8:9] +; GFX1250-NEXT: global_store_b8 v2, v3, s[10:11] ; GFX1250-NEXT: s_endpgm %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) %val = extractvalue { i64, i1 } %usub, 0 @@ -3099,70 +3099,70 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; ; GFX1250-LABEL: sudiv64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_or_b64 s[6:7], s[2:3], s[4:5] +; GFX1250-NEXT: s_or_b64 s[4:5], s[2:3], s[6:7] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_and_b64 s[6:7], s[6:7], 0xffffffff00000000 -; GFX1250-NEXT: s_cmp_lg_u64 s[6:7], 0 +; GFX1250-NEXT: s_and_b64 s[4:5], s[4:5], 0xffffffff00000000 +; GFX1250-NEXT: s_cmp_lg_u64 s[4:5], 0 ; GFX1250-NEXT: s_cbranch_scc0 .LBB16_4 ; GFX1250-NEXT: ; %bb.1: -; GFX1250-NEXT: s_cvt_f32_u32 s6, s4 -; GFX1250-NEXT: s_cvt_f32_u32 s7, s5 -; GFX1250-NEXT: s_sub_nc_u64 s[10:11], 0, s[4:5] +; GFX1250-NEXT: s_cvt_f32_u32 s4, s6 +; GFX1250-NEXT: s_cvt_f32_u32 s5, s7 +; GFX1250-NEXT: s_sub_nc_u64 s[10:11], 0, s[6:7] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_2) | instskip(NEXT) | instid1(SALU_CYCLE_3) -; GFX1250-NEXT: s_fmac_f32 s6, s7, 0x4f800000 -; GFX1250-NEXT: v_s_rcp_f32 s6, s6 +; GFX1250-NEXT: s_fmac_f32 s4, s5, 0x4f800000 +; GFX1250-NEXT: v_s_rcp_f32 s4, s4 ; GFX1250-NEXT: s_delay_alu instid0(TRANS32_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_3) -; GFX1250-NEXT: s_mul_f32 s6, s6, 0x5f7ffffc -; GFX1250-NEXT: s_mul_f32 s7, s6, 0x2f800000 +; GFX1250-NEXT: s_mul_f32 s4, s4, 0x5f7ffffc +; GFX1250-NEXT: s_mul_f32 s5, s4, 0x2f800000 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_3) -; GFX1250-NEXT: s_trunc_f32 s7, s7 -; GFX1250-NEXT: s_fmac_f32 s6, s7, 0xcf800000 -; GFX1250-NEXT: s_cvt_u32_f32 s9, s7 -; GFX1250-NEXT: s_mov_b32 s7, 0 +; GFX1250-NEXT: s_trunc_f32 s5, s5 +; GFX1250-NEXT: s_fmac_f32 s4, s5, 0xcf800000 +; GFX1250-NEXT: s_cvt_u32_f32 s9, s5 +; GFX1250-NEXT: s_mov_b32 s5, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3) -; GFX1250-NEXT: s_cvt_u32_f32 s8, s6 +; GFX1250-NEXT: s_cvt_u32_f32 s8, s4 ; GFX1250-NEXT: s_mul_u64 s[12:13], s[10:11], s[8:9] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_mul_hi_u32 s15, s8, s13 ; GFX1250-NEXT: s_mul_i32 s14, s8, s13 -; GFX1250-NEXT: s_mul_hi_u32 s6, s8, s12 +; GFX1250-NEXT: s_mul_hi_u32 s4, s8, s12 ; GFX1250-NEXT: s_mul_i32 s17, s9, s12 -; GFX1250-NEXT: s_add_nc_u64 s[14:15], s[6:7], s[14:15] +; GFX1250-NEXT: s_add_nc_u64 s[14:15], s[4:5], s[14:15] ; GFX1250-NEXT: s_mul_hi_u32 s16, s9, s12 ; GFX1250-NEXT: s_mul_hi_u32 s18, s9, s13 -; GFX1250-NEXT: s_add_co_u32 s6, s14, s17 -; GFX1250-NEXT: s_add_co_ci_u32 s6, s15, s16 +; GFX1250-NEXT: s_add_co_u32 s4, s14, s17 +; GFX1250-NEXT: s_add_co_ci_u32 s4, s15, s16 ; GFX1250-NEXT: s_mul_i32 s12, s9, s13 ; GFX1250-NEXT: s_add_co_ci_u32 s13, s18, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[6:7], s[12:13] +; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[4:5], s[12:13] ; GFX1250-NEXT: s_add_co_u32 s8, s8, s12 -; GFX1250-NEXT: s_cselect_b32 s6, -1, 0 +; GFX1250-NEXT: s_cselect_b32 s4, -1, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_cmp_lg_u32 s6, 0 +; GFX1250-NEXT: s_cmp_lg_u32 s4, 0 ; GFX1250-NEXT: s_add_co_ci_u32 s9, s9, s13 ; GFX1250-NEXT: s_mul_u64 s[10:11], s[10:11], s[8:9] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_mul_hi_u32 s13, s8, s11 ; GFX1250-NEXT: s_mul_i32 s12, s8, s11 -; GFX1250-NEXT: s_mul_hi_u32 s6, s8, s10 +; GFX1250-NEXT: s_mul_hi_u32 s4, s8, s10 ; GFX1250-NEXT: s_mul_i32 s15, s9, s10 -; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[6:7], s[12:13] +; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[4:5], s[12:13] ; GFX1250-NEXT: s_mul_hi_u32 s14, s9, s10 ; GFX1250-NEXT: s_mul_hi_u32 s16, s9, s11 -; GFX1250-NEXT: s_add_co_u32 s6, s12, s15 -; GFX1250-NEXT: s_add_co_ci_u32 s6, s13, s14 +; GFX1250-NEXT: s_add_co_u32 s4, s12, s15 +; GFX1250-NEXT: s_add_co_ci_u32 s4, s13, s14 ; GFX1250-NEXT: s_mul_i32 s10, s9, s11 ; GFX1250-NEXT: s_add_co_ci_u32 s11, s16, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_add_nc_u64 s[10:11], s[6:7], s[10:11] +; GFX1250-NEXT: s_add_nc_u64 s[10:11], s[4:5], s[10:11] ; GFX1250-NEXT: s_add_co_u32 s8, s8, s10 ; GFX1250-NEXT: s_cselect_b32 s10, -1, 0 -; GFX1250-NEXT: s_mul_hi_u32 s6, s2, s8 +; GFX1250-NEXT: s_mul_hi_u32 s4, s2, s8 ; GFX1250-NEXT: s_cmp_lg_u32 s10, 0 ; GFX1250-NEXT: s_mul_hi_u32 s12, s3, s8 ; GFX1250-NEXT: s_add_co_ci_u32 s10, s9, s11 @@ -3170,33 +3170,33 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GFX1250-NEXT: s_mul_hi_u32 s9, s2, s10 ; GFX1250-NEXT: s_mul_i32 s8, s2, s10 ; GFX1250-NEXT: s_mul_hi_u32 s13, s3, s10 -; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[6:7], s[8:9] +; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[4:5], s[8:9] ; GFX1250-NEXT: s_mul_i32 s10, s3, s10 -; GFX1250-NEXT: s_add_co_u32 s6, s8, s11 -; GFX1250-NEXT: s_add_co_ci_u32 s6, s9, s12 +; GFX1250-NEXT: s_add_co_u32 s4, s8, s11 +; GFX1250-NEXT: s_add_co_ci_u32 s4, s9, s12 ; GFX1250-NEXT: s_add_co_ci_u32 s11, s13, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[6:7], s[10:11] +; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[4:5], s[10:11] ; GFX1250-NEXT: s_and_b64 s[10:11], s[8:9], 0xffffffff00000000 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: s_or_b32 s10, s10, s8 -; GFX1250-NEXT: s_mul_u64 s[8:9], s[4:5], s[10:11] +; GFX1250-NEXT: s_mul_u64 s[8:9], s[6:7], s[10:11] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_sub_co_u32 s6, s2, s8 +; GFX1250-NEXT: s_sub_co_u32 s4, s2, s8 ; GFX1250-NEXT: s_cselect_b32 s8, -1, 0 ; GFX1250-NEXT: s_sub_co_i32 s12, s3, s9 ; GFX1250-NEXT: s_cmp_lg_u32 s8, 0 -; GFX1250-NEXT: s_sub_co_ci_u32 s12, s12, s5 -; GFX1250-NEXT: s_sub_co_u32 s13, s6, s4 +; GFX1250-NEXT: s_sub_co_ci_u32 s12, s12, s7 +; GFX1250-NEXT: s_sub_co_u32 s13, s4, s6 ; GFX1250-NEXT: s_cselect_b32 s14, -1, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: s_cmp_lg_u32 s14, 0 ; GFX1250-NEXT: s_sub_co_ci_u32 s12, s12, 0 -; GFX1250-NEXT: s_cmp_ge_u32 s12, s5 +; GFX1250-NEXT: s_cmp_ge_u32 s12, s7 ; GFX1250-NEXT: s_cselect_b32 s14, -1, 0 -; GFX1250-NEXT: s_cmp_ge_u32 s13, s4 +; GFX1250-NEXT: s_cmp_ge_u32 s13, s6 ; GFX1250-NEXT: s_cselect_b32 s15, -1, 0 -; GFX1250-NEXT: s_cmp_eq_u32 s12, s5 +; GFX1250-NEXT: s_cmp_eq_u32 s12, s7 ; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[10:11], 1 ; GFX1250-NEXT: s_cselect_b32 s16, s15, s14 ; GFX1250-NEXT: s_add_nc_u64 s[14:15], s[10:11], 2 @@ -3206,20 +3206,20 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GFX1250-NEXT: s_cmp_lg_u32 s8, 0 ; GFX1250-NEXT: s_sub_co_ci_u32 s3, s3, s9 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_cmp_ge_u32 s3, s5 +; GFX1250-NEXT: s_cmp_ge_u32 s3, s7 ; GFX1250-NEXT: s_cselect_b32 s8, -1, 0 -; GFX1250-NEXT: s_cmp_ge_u32 s6, s4 -; GFX1250-NEXT: s_cselect_b32 s6, -1, 0 -; GFX1250-NEXT: s_cmp_eq_u32 s3, s5 -; GFX1250-NEXT: s_cselect_b32 s3, s6, s8 +; GFX1250-NEXT: s_cmp_ge_u32 s4, s6 +; GFX1250-NEXT: s_cselect_b32 s4, -1, 0 +; GFX1250-NEXT: s_cmp_eq_u32 s3, s7 +; GFX1250-NEXT: s_cselect_b32 s3, s4, s8 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: s_cmp_lg_u32 s3, 0 ; GFX1250-NEXT: s_cselect_b32 s9, s13, s11 ; GFX1250-NEXT: s_cselect_b32 s8, s12, s10 ; GFX1250-NEXT: s_cbranch_execnz .LBB16_3 ; GFX1250-NEXT: .LBB16_2: -; GFX1250-NEXT: v_cvt_f32_u32_e32 v0, s4 -; GFX1250-NEXT: s_sub_co_i32 s5, 0, s4 +; GFX1250-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GFX1250-NEXT: s_sub_co_i32 s4, 0, s6 ; GFX1250-NEXT: s_mov_b32 s9, 0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(TRANS32_DEP_1) ; GFX1250-NEXT: v_rcp_iflag_f32_e32 v0, v0 @@ -3228,23 +3228,23 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) { ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_cvt_u32_f32_e32 v0, v0 ; GFX1250-NEXT: v_readfirstlane_b32 s3, v0 -; GFX1250-NEXT: s_mul_i32 s5, s5, s3 +; GFX1250-NEXT: s_mul_i32 s4, s4, s3 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_mul_hi_u32 s5, s3, s5 -; GFX1250-NEXT: s_add_co_i32 s3, s3, s5 +; GFX1250-NEXT: s_mul_hi_u32 s4, s3, s4 +; GFX1250-NEXT: s_add_co_i32 s3, s3, s4 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX1250-NEXT: s_mul_hi_u32 s3, s2, s3 -; GFX1250-NEXT: s_mul_i32 s5, s3, s4 +; GFX1250-NEXT: s_mul_i32 s4, s3, s6 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-NEXT: s_sub_co_i32 s2, s2, s5 -; GFX1250-NEXT: s_add_co_i32 s5, s3, 1 -; GFX1250-NEXT: s_sub_co_i32 s6, s2, s4 -; GFX1250-NEXT: s_cmp_ge_u32 s2, s4 -; GFX1250-NEXT: s_cselect_b32 s3, s5, s3 -; GFX1250-NEXT: s_cselect_b32 s2, s6, s2 -; GFX1250-NEXT: s_add_co_i32 s5, s3, 1 -; GFX1250-NEXT: s_cmp_ge_u32 s2, s4 -; GFX1250-NEXT: s_cselect_b32 s8, s5, s3 +; GFX1250-NEXT: s_sub_co_i32 s2, s2, s4 +; GFX1250-NEXT: s_add_co_i32 s4, s3, 1 +; GFX1250-NEXT: s_sub_co_i32 s5, s2, s6 +; GFX1250-NEXT: s_cmp_ge_u32 s2, s6 +; GFX1250-NEXT: s_cselect_b32 s3, s4, s3 +; GFX1250-NEXT: s_cselect_b32 s2, s5, s2 +; GFX1250-NEXT: s_add_co_i32 s4, s3, 1 +; GFX1250-NEXT: s_cmp_ge_u32 s2, s6 +; GFX1250-NEXT: s_cselect_b32 s8, s4, s3 ; GFX1250-NEXT: .LBB16_3: ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[8:9] diff --git a/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir b/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir index 029aa39..c475efb 100644 --- a/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir +++ b/llvm/test/CodeGen/AMDGPU/coalesce-copy-to-agpr-to-av-registers.mir @@ -128,13 +128,13 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 ; CHECK-NEXT: undef [[COPY2:%[0-9]+]].sub0_sub1:areg_128 = COPY [[COPY]] ; CHECK-NEXT: [[COPY2:%[0-9]+]].sub2_sub3:areg_128 = COPY [[COPY1]] - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY2]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY2]] ; CHECK-NEXT: SI_RETURN %0:vreg_64 = COPY $vgpr0_vgpr1 %1:vreg_64 = COPY $vgpr2_vgpr3 undef %2.sub0_sub1:areg_128 = COPY %0 %2.sub2_sub3:areg_128 = COPY %1 - INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2 + INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2 SI_RETURN ... @@ -153,13 +153,13 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 ; CHECK-NEXT: undef [[COPY2:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[COPY]] ; CHECK-NEXT: [[COPY2:%[0-9]+]].sub2_sub3:areg_128_align2 = COPY [[COPY1]] - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY2]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY2]] ; CHECK-NEXT: SI_RETURN %0:vreg_64 = COPY $vgpr0_vgpr1 %1:vreg_64 = COPY $vgpr2_vgpr3 undef %2.sub0_sub1:areg_128_align2 = COPY %0 %2.sub2_sub3:areg_128_align2 = COPY %1 - INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2 + INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2 SI_RETURN ... @@ -398,14 +398,14 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128 = COPY [[COPY]] ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128 = COPY [[COPY]] ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128 = COPY [[COPY]] - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN %0:vgpr_32 = COPY $vgpr0 undef %1.sub0:areg_128 = COPY %0 %1.sub1:areg_128 = COPY %0 %1.sub2:areg_128 = COPY %0 %1.sub3:areg_128 = COPY %0 - INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %1 + INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %1 SI_RETURN ... @@ -425,14 +425,14 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128_align2 = COPY [[COPY]] ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128_align2 = COPY [[COPY]] ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128_align2 = COPY [[COPY]] - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN %0:vgpr_32 = COPY $vgpr0 undef %1.sub0:areg_128_align2 = COPY %0 %1.sub1:areg_128_align2 = COPY %0 %1.sub2:areg_128_align2 = COPY %0 %1.sub3:areg_128_align2 = COPY %0 - INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %1 + INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %1 SI_RETURN ... @@ -504,7 +504,7 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_64 = COPY [[COPY]].sub0 ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY [[COPY]].sub0 ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 4325385 /* reguse:AReg_64 */, [[COPY1]] - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 3670025 /* reguse:VS_64_with_sub1 */, [[COPY]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 3735561 /* reguse:VReg_64 */, [[COPY]] ; CHECK-NEXT: SI_RETURN %0:vgpr_32 = COPY $vgpr0 undef %1.sub0:areg_64 = COPY %0 @@ -512,7 +512,7 @@ body: | undef %2.sub0:vreg_64 = COPY %0 %2.sub1:vreg_64 = COPY %0 INLINEASM &"; use $0", 0 /* attdialect */, 4325385 /* reguse:AReg_64 */, killed %1 - INLINEASM &"; use $0", 0 /* attdialect */, 3670025 /* reguse:VReg_64 */, killed %2 + INLINEASM &"; use $0", 0 /* attdialect */, 3735561 /* reguse:VReg_64 */, killed %2 SI_RETURN ... @@ -641,13 +641,13 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]].sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3 ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128 = COPY [[COPY]].sub0_sub1 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128 = COPY [[COPY]].sub2_sub3 - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN undef %0.sub0_sub1:vreg_128 =COPY $vgpr0_vgpr1 %0.sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3 undef %2.sub0_sub1:areg_128 = COPY %0.sub0_sub1 %2.sub2_sub3:areg_128 = COPY %0.sub2_sub3 - INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2 + INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2 SI_RETURN ... @@ -668,13 +668,13 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_128 = COPY $vgpr2_vgpr3 ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[COPY]].sub0 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128_align2 = COPY [[COPY]].sub1 - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN undef %0.sub0:vreg_128 =COPY $vgpr0_vgpr1 %0.sub1:vreg_128 = COPY $vgpr2_vgpr3 undef %2.sub0_sub1:areg_128_align2 = COPY %0.sub0 %2.sub2_sub3:areg_128_align2 = COPY %0.sub1 - INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2 + INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2 SI_RETURN ... @@ -890,14 +890,14 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128 = COPY [[COPY]].sub0 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128 = COPY [[COPY]].sub0 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128 = COPY [[COPY]].sub0 - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN undef %0.sub0:vreg_64 = COPY $vgpr0 undef %1.sub0:areg_128 = COPY %0.sub0 %1.sub1:areg_128 = COPY %0.sub0 %1.sub2:areg_128 = COPY %0.sub0 %1.sub3:areg_128 = COPY %0.sub0 - INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %1 + INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %1 SI_RETURN ... @@ -917,14 +917,14 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:areg_128_align2 = COPY [[COPY]].sub0 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:areg_128_align2 = COPY [[COPY]].sub0 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:areg_128_align2 = COPY [[COPY]].sub0 - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN undef %0.sub0:vreg_64 = COPY $vgpr0 undef %1.sub0:areg_128_align2 = COPY %0.sub0 %1.sub1:areg_128_align2 = COPY %0.sub0 %1.sub2:areg_128_align2 = COPY %0.sub0 %1.sub3:areg_128_align2 = COPY %0.sub0 - INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %1 + INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %1 SI_RETURN ... @@ -1051,13 +1051,13 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]].sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3 ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128 = COPY [[COPY]].sub0_sub1 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128 = COPY [[COPY]].sub2_sub3 - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN undef %0.sub0_sub1:vreg_128 = COPY $vgpr0_vgpr1 %0.sub2_sub3:vreg_128 = COPY $vgpr2_vgpr3 undef %2.sub0_sub1:areg_128 = COPY %0.sub0_sub1 %2.sub2_sub3:areg_128 = COPY %0.sub2_sub3 - INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2 + INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2 SI_RETURN ... @@ -1076,13 +1076,13 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]].sub2_sub3:vreg_128_align2 = COPY $vgpr2_vgpr3 ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[COPY]].sub0_sub1 ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2_sub3:areg_128_align2 = COPY [[COPY]].sub2_sub3 - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN undef %0.sub0_sub1:vreg_128_align2 = COPY $vgpr0_vgpr1 %0.sub2_sub3:vreg_128_align2 = COPY $vgpr2_vgpr3 undef %2.sub0_sub1:areg_128_align2 = COPY %0.sub0_sub1 %2.sub2_sub3:areg_128_align2 = COPY %0.sub2_sub3 - INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2 + INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2 SI_RETURN ... @@ -1358,11 +1358,11 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:areg_128 = COPY [[COPY]] - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN %0:vreg_128 = COPY $vgpr0_vgpr1_vgpr2_vgpr3 %2:areg_128 = COPY %0 - INLINEASM &"; use $0", 0 /* attdialect */, 8323081 /* reguse:AReg_128 */, killed %2 + INLINEASM &"; use $0", 0 /* attdialect */, 8847369 /* reguse:AReg_128 */, killed %2 SI_RETURN ... @@ -1379,11 +1379,11 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_128_align2 = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:areg_128_align2 = COPY [[COPY]] - ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY1]] + ; CHECK-NEXT: INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY1]] ; CHECK-NEXT: SI_RETURN %0:vreg_128_align2 = COPY $vgpr0_vgpr1_vgpr2_vgpr3 %2:areg_128_align2 = COPY %0 - INLINEASM &"; use $0", 0 /* attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %2 + INLINEASM &"; use $0", 0 /* attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %2 SI_RETURN ... diff --git a/llvm/test/CodeGen/AMDGPU/ds_write2.ll b/llvm/test/CodeGen/AMDGPU/ds_write2.ll index be60a00..0cae0e5 100644 --- a/llvm/test/CodeGen/AMDGPU/ds_write2.ll +++ b/llvm/test/CodeGen/AMDGPU/ds_write2.ll @@ -705,12 +705,13 @@ define amdgpu_kernel void @write2_ptr_subreg_arg_two_val_f32(ptr addrspace(1) %C ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b32 v1, v0, s[0:1] scale_offset -; GFX1250-NEXT: global_load_b32 v0, v0, s[2:3] scale_offset -; GFX1250-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5 +; GFX1250-NEXT: global_load_b32 v2, v0, s[2:3] scale_offset +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v3, s5 ; GFX1250-NEXT: s_wait_loadcnt 0x1 -; GFX1250-NEXT: ds_store_b32 v2, v1 offset:32 +; GFX1250-NEXT: ds_store_b32 v0, v1 offset:32 ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: ds_store_b32 v3, v0 offset:32 +; GFX1250-NEXT: ds_store_b32 v3, v2 offset:32 ; GFX1250-NEXT: s_endpgm %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %in0.gep = getelementptr float, ptr addrspace(1) %in0, i32 %x.i @@ -1282,14 +1283,14 @@ define amdgpu_kernel void @simple_write2_v4f32_superreg_align4(ptr addrspace(3) ; ; GFX1250-LABEL: simple_write2_v4f32_superreg_align4: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x8 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x0 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x8 +; GFX1250-NEXT: s_load_b32 s8, s[4:5], 0x0 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_load_b128 s[0:3], s[0:1], 0x0 +; GFX1250-NEXT: s_load_b128 s[0:3], s[6:7], 0x0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_lshl_add_u32 v0, v0, 4, s4 +; GFX1250-NEXT: v_lshl_add_u32 v0, v0, 4, s8 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s3 ; GFX1250-NEXT: v_dual_mov_b32 v3, s0 :: v_dual_mov_b32 v4, s1 diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll index 1e7855c..eefc781 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll @@ -541,11 +541,10 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-LABEL: flat_xchg_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -570,9 +569,8 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB10_2 ; GFX1250-SDAG-NEXT: .LBB10_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: s_clause 0x1 @@ -586,14 +584,13 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; ; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -618,10 +615,9 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB10_2 ; GFX1250-GISEL-NEXT: .LBB10_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off @@ -727,13 +723,12 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB11_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -754,9 +749,8 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB11_2 ; GFX1250-SDAG-NEXT: .LBB11_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: s_clause 0x1 @@ -770,8 +764,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; ; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -780,7 +773,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -805,10 +798,9 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB11_2 ; GFX1250-GISEL-NEXT: .LBB11_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off @@ -917,11 +909,10 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -943,9 +934,8 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB12_2 ; GFX1250-SDAG-NEXT: .LBB12_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off @@ -953,15 +943,14 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB12_3 @@ -982,10 +971,9 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB12_2 ; GFX1250-GISEL-NEXT: .LBB12_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off ; GFX1250-GISEL-NEXT: s_endpgm @@ -1069,11 +1057,9 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB13_3 @@ -1094,9 +1080,8 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB13_2 ; GFX1250-SDAG-NEXT: .LBB13_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off @@ -1104,8 +1089,7 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; ; GFX1250-GISEL-LABEL: flat_xchg_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -1115,7 +1099,7 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB13_3 @@ -1136,10 +1120,9 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB13_2 ; GFX1250-GISEL-NEXT: .LBB13_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off ; GFX1250-GISEL-NEXT: s_endpgm @@ -1400,11 +1383,10 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_add_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -1429,9 +1411,8 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB18_2 ; GFX1250-SDAG-NEXT: .LBB18_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -1445,14 +1426,13 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_add_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -1477,10 +1457,9 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB18_2 ; GFX1250-GISEL-NEXT: .LBB18_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -1590,13 +1569,12 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB19_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -1617,9 +1595,8 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB19_2 ; GFX1250-SDAG-NEXT: .LBB19_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -1633,8 +1610,7 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_add_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -1643,7 +1619,7 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -1668,10 +1644,9 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB19_2 ; GFX1250-GISEL-NEXT: .LBB19_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -1784,11 +1759,10 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -1810,9 +1784,8 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB20_2 ; GFX1250-SDAG-NEXT: .LBB20_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -1823,15 +1796,14 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_add_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB20_3 @@ -1852,10 +1824,9 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB20_2 ; GFX1250-GISEL-NEXT: .LBB20_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -1950,11 +1921,9 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB21_3 @@ -1975,9 +1944,8 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB21_2 ; GFX1250-SDAG-NEXT: .LBB21_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -1988,8 +1956,7 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_add_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -1999,7 +1966,7 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB21_3 @@ -2020,10 +1987,9 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB21_2 ; GFX1250-GISEL-NEXT: .LBB21_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -2295,11 +2261,10 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_sub_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -2324,9 +2289,8 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB26_2 ; GFX1250-SDAG-NEXT: .LBB26_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -2340,14 +2304,13 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -2372,10 +2335,9 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB26_2 ; GFX1250-GISEL-NEXT: .LBB26_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -2487,13 +2449,12 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB27_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -2514,9 +2475,8 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB27_2 ; GFX1250-SDAG-NEXT: .LBB27_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -2530,8 +2490,7 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -2540,7 +2499,7 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -2565,10 +2524,9 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB27_2 ; GFX1250-GISEL-NEXT: .LBB27_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -2683,11 +2641,10 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -2709,9 +2666,8 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB28_2 ; GFX1250-SDAG-NEXT: .LBB28_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -2722,15 +2678,14 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB28_3 @@ -2751,10 +2706,9 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB28_2 ; GFX1250-GISEL-NEXT: .LBB28_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -2851,11 +2805,9 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB29_3 @@ -2876,9 +2828,8 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB29_2 ; GFX1250-SDAG-NEXT: .LBB29_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -2889,8 +2840,7 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_sub_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -2900,7 +2850,7 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB29_3 @@ -2921,10 +2871,9 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB29_2 ; GFX1250-GISEL-NEXT: .LBB29_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -3198,11 +3147,10 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_and_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -3227,9 +3175,8 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB34_2 ; GFX1250-SDAG-NEXT: .LBB34_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -3244,14 +3191,13 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_and_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -3276,10 +3222,9 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB34_2 ; GFX1250-GISEL-NEXT: .LBB34_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -3390,13 +3335,12 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB35_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -3417,9 +3361,8 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB35_2 ; GFX1250-SDAG-NEXT: .LBB35_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -3434,8 +3377,7 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_and_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -3444,7 +3386,7 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -3469,10 +3411,9 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB35_2 ; GFX1250-GISEL-NEXT: .LBB35_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -3586,11 +3527,10 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -3612,9 +3552,8 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB36_2 ; GFX1250-SDAG-NEXT: .LBB36_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -3626,15 +3565,14 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_and_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB36_3 @@ -3655,10 +3593,9 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB36_2 ; GFX1250-GISEL-NEXT: .LBB36_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -3754,11 +3691,9 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB37_3 @@ -3779,9 +3714,8 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB37_2 ; GFX1250-SDAG-NEXT: .LBB37_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -3793,8 +3727,7 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_and_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -3804,7 +3737,7 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB37_3 @@ -3825,10 +3758,9 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB37_2 ; GFX1250-GISEL-NEXT: .LBB37_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -4101,11 +4033,10 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-LABEL: flat_or_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -4130,9 +4061,8 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB42_2 ; GFX1250-SDAG-NEXT: .LBB42_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -4147,14 +4077,13 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; ; GFX1250-GISEL-LABEL: flat_or_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -4179,10 +4108,9 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB42_2 ; GFX1250-GISEL-NEXT: .LBB42_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -4293,13 +4221,12 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB43_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -4320,9 +4247,8 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB43_2 ; GFX1250-SDAG-NEXT: .LBB43_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -4337,8 +4263,7 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; ; GFX1250-GISEL-LABEL: flat_or_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -4347,7 +4272,7 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -4372,10 +4297,9 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB43_2 ; GFX1250-GISEL-NEXT: .LBB43_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -4489,11 +4413,10 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -4515,9 +4438,8 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB44_2 ; GFX1250-SDAG-NEXT: .LBB44_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -4529,15 +4451,14 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; ; GFX1250-GISEL-LABEL: flat_or_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB44_3 @@ -4558,10 +4479,9 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB44_2 ; GFX1250-GISEL-NEXT: .LBB44_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -4657,11 +4577,9 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB45_3 @@ -4682,9 +4600,8 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB45_2 ; GFX1250-SDAG-NEXT: .LBB45_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -4696,8 +4613,7 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; ; GFX1250-GISEL-LABEL: flat_or_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -4707,7 +4623,7 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB45_3 @@ -4728,10 +4644,9 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB45_2 ; GFX1250-GISEL-NEXT: .LBB45_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -5004,11 +4919,10 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_xor_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -5033,9 +4947,8 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB50_2 ; GFX1250-SDAG-NEXT: .LBB50_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -5050,14 +4963,13 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -5082,10 +4994,9 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB50_2 ; GFX1250-GISEL-NEXT: .LBB50_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -5196,13 +5107,12 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB51_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -5223,9 +5133,8 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB51_2 ; GFX1250-SDAG-NEXT: .LBB51_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -5240,8 +5149,7 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -5250,7 +5158,7 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -5275,10 +5183,9 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB51_2 ; GFX1250-GISEL-NEXT: .LBB51_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -5392,11 +5299,10 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -5418,9 +5324,8 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB52_2 ; GFX1250-SDAG-NEXT: .LBB52_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -5432,15 +5337,14 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB52_3 @@ -5461,10 +5365,9 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB52_2 ; GFX1250-GISEL-NEXT: .LBB52_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -5560,11 +5463,9 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB53_3 @@ -5585,9 +5486,8 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB53_2 ; GFX1250-SDAG-NEXT: .LBB53_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -5599,8 +5499,7 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_xor_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -5610,7 +5509,7 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB53_3 @@ -5631,10 +5530,9 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB53_2 ; GFX1250-GISEL-NEXT: .LBB53_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -5877,11 +5775,10 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_max_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -5902,9 +5799,8 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB58_2 ; GFX1250-SDAG-NEXT: .LBB58_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -5918,14 +5814,13 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_max_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -5946,10 +5841,9 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB58_2 ; GFX1250-GISEL-NEXT: .LBB58_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -6061,13 +5955,12 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB59_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -6084,9 +5977,8 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB59_2 ; GFX1250-SDAG-NEXT: .LBB59_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -6100,8 +5992,7 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_max_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -6110,7 +6001,7 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -6131,10 +6022,9 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB59_2 ; GFX1250-GISEL-NEXT: .LBB59_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -6249,11 +6139,10 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -6272,9 +6161,8 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB60_2 ; GFX1250-SDAG-NEXT: .LBB60_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -6285,15 +6173,14 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_max_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB60_3 @@ -6311,10 +6198,9 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB60_2 ; GFX1250-GISEL-NEXT: .LBB60_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -6409,11 +6295,9 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB61_3 @@ -6431,9 +6315,8 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB61_2 ; GFX1250-SDAG-NEXT: .LBB61_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -6444,8 +6327,7 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_max_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -6455,7 +6337,7 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB61_3 @@ -6473,10 +6355,9 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB61_2 ; GFX1250-GISEL-NEXT: .LBB61_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -6718,11 +6599,10 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_min_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -6743,9 +6623,8 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB66_2 ; GFX1250-SDAG-NEXT: .LBB66_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -6759,14 +6638,13 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_min_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -6787,10 +6665,9 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB66_2 ; GFX1250-GISEL-NEXT: .LBB66_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -6902,13 +6779,12 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB67_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -6925,9 +6801,8 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB67_2 ; GFX1250-SDAG-NEXT: .LBB67_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -6941,8 +6816,7 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_min_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -6951,7 +6825,7 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -6972,10 +6846,9 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB67_2 ; GFX1250-GISEL-NEXT: .LBB67_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -7090,11 +6963,10 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -7113,9 +6985,8 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB68_2 ; GFX1250-SDAG-NEXT: .LBB68_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -7126,15 +6997,14 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_min_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB68_3 @@ -7152,10 +7022,9 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB68_2 ; GFX1250-GISEL-NEXT: .LBB68_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -7250,11 +7119,9 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB69_3 @@ -7272,9 +7139,8 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB69_2 ; GFX1250-SDAG-NEXT: .LBB69_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -7285,8 +7151,7 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_min_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -7296,7 +7161,7 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB69_3 @@ -7314,10 +7179,9 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB69_2 ; GFX1250-GISEL-NEXT: .LBB69_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -7559,11 +7423,10 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-LABEL: flat_umax_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -7584,9 +7447,8 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB74_2 ; GFX1250-SDAG-NEXT: .LBB74_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -7600,14 +7462,13 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; ; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -7628,10 +7489,9 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB74_2 ; GFX1250-GISEL-NEXT: .LBB74_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -7743,13 +7603,12 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB75_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -7766,9 +7625,8 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB75_2 ; GFX1250-SDAG-NEXT: .LBB75_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -7782,8 +7640,7 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; ; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -7792,7 +7649,7 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -7813,10 +7670,9 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB75_2 ; GFX1250-GISEL-NEXT: .LBB75_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -7931,11 +7787,10 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -7954,9 +7809,8 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB76_2 ; GFX1250-SDAG-NEXT: .LBB76_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -7967,15 +7821,14 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB76_3 @@ -7993,10 +7846,9 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB76_2 ; GFX1250-GISEL-NEXT: .LBB76_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -8091,11 +7943,9 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB77_3 @@ -8113,9 +7963,8 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB77_2 ; GFX1250-SDAG-NEXT: .LBB77_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -8126,8 +7975,7 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; ; GFX1250-GISEL-LABEL: flat_umax_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -8137,7 +7985,7 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB77_3 @@ -8155,10 +8003,9 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB77_2 ; GFX1250-GISEL-NEXT: .LBB77_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -8400,11 +8247,10 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-LABEL: flat_umin_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -8425,9 +8271,8 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB82_2 ; GFX1250-SDAG-NEXT: .LBB82_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -8441,14 +8286,13 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; ; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -8469,10 +8313,9 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB82_2 ; GFX1250-GISEL-NEXT: .LBB82_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -8584,13 +8427,12 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB83_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -8607,9 +8449,8 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB83_2 ; GFX1250-SDAG-NEXT: .LBB83_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -8623,8 +8464,7 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; ; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -8633,7 +8473,7 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -8654,10 +8494,9 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB83_2 ; GFX1250-GISEL-NEXT: .LBB83_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -8772,11 +8611,10 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -8795,9 +8633,8 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB84_2 ; GFX1250-SDAG-NEXT: .LBB84_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -8808,15 +8645,14 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB84_3 @@ -8834,10 +8670,9 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB84_2 ; GFX1250-GISEL-NEXT: .LBB84_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -8932,11 +8767,9 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB85_3 @@ -8954,9 +8787,8 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB85_2 ; GFX1250-SDAG-NEXT: .LBB85_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -8967,8 +8799,7 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; ; GFX1250-GISEL-LABEL: flat_umin_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -8978,7 +8809,7 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB85_3 @@ -8996,10 +8827,9 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB85_2 ; GFX1250-GISEL-NEXT: .LBB85_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -9281,12 +9111,11 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v5, v4 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v3 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -9311,9 +9140,8 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB90_2 ; GFX1250-SDAG-NEXT: .LBB90_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v8, off @@ -9328,15 +9156,14 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; ; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v10, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v0, v5 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v3, v10 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v3 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -9361,10 +9188,9 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 % ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB90_2 ; GFX1250-GISEL-NEXT: .LBB90_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -9485,13 +9311,12 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v3 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v3 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB91_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -9512,9 +9337,8 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB91_2 ; GFX1250-SDAG-NEXT: .LBB91_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v8, off @@ -9529,10 +9353,9 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; ; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v10, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v5 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo @@ -9540,7 +9363,7 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v3, v10 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v3 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -9565,10 +9388,9 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB91_2 ; GFX1250-GISEL-NEXT: .LBB91_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -9690,13 +9512,12 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_nortn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1 -; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v5, v4 +; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3 +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v2 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -9718,9 +9539,8 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB92_2 ; GFX1250-SDAG-NEXT: .LBB92_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v2, off @@ -9732,15 +9552,15 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; ; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, src_flat_scratch_base_hi :: v_dual_mov_b32 v8, v1 ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v1, v3, v5 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB92_3 @@ -9761,10 +9581,9 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB92_2 ; GFX1250-GISEL-NEXT: .LBB92_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -9869,11 +9688,9 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v2 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB93_3 @@ -9894,9 +9711,8 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB93_2 ; GFX1250-SDAG-NEXT: .LBB93_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v2, off @@ -9908,18 +9724,18 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; ; GFX1250-GISEL-LABEL: flat_cmpxchg_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, src_flat_scratch_base_hi :: v_dual_mov_b32 v8, v1 ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v1, v3, v5 bitop3:0x14 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB93_3 @@ -9940,10 +9756,9 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB93_2 ; GFX1250-GISEL-NEXT: .LBB93_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -10188,11 +10003,10 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_inc_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -10214,10 +10028,9 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB98_2 ; GFX1250-SDAG-NEXT: .LBB98_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off @@ -10233,14 +10046,13 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -10262,18 +10074,17 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB98_2 ; GFX1250-GISEL-NEXT: .LBB98_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 @@ -10386,13 +10197,12 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB99_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -10410,10 +10220,9 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB99_2 ; GFX1250-SDAG-NEXT: .LBB99_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off @@ -10429,8 +10238,7 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -10439,7 +10247,7 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -10461,18 +10269,17 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB99_2 ; GFX1250-GISEL-NEXT: .LBB99_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 @@ -10588,11 +10395,10 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -10610,9 +10416,8 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB100_2 ; GFX1250-SDAG-NEXT: .LBB100_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off @@ -10625,15 +10430,14 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB100_3 @@ -10650,17 +10454,16 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB100_2 ; GFX1250-GISEL-NEXT: .LBB100_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off ; GFX1250-GISEL-NEXT: s_endpgm @@ -10754,11 +10557,9 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB101_3 @@ -10775,9 +10576,8 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB101_2 ; GFX1250-SDAG-NEXT: .LBB101_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off @@ -10790,8 +10590,7 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_inc_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -10801,7 +10600,7 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB101_3 @@ -10818,17 +10617,16 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB101_2 ; GFX1250-GISEL-NEXT: .LBB101_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1] ; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off ; GFX1250-GISEL-NEXT: s_endpgm @@ -11064,11 +10862,10 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-LABEL: flat_dec_saddr_i64_rtn: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -11090,10 +10887,9 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB106_2 ; GFX1250-SDAG-NEXT: .LBB106_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s0, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off @@ -11112,14 +10908,13 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; ; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_rtn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -11141,11 +10936,10 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB106_2 ; GFX1250-GISEL-NEXT: .LBB106_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -11268,13 +11062,12 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB107_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow @@ -11292,10 +11085,9 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB107_2 ; GFX1250-SDAG-NEXT: .LBB107_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s0, v4 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off @@ -11314,8 +11106,7 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; ; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_rtn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v8, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3 @@ -11324,7 +11115,7 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v7, v8 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, src_flat_scratch_base_hi, v7 bitop3:0x14 ; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo @@ -11346,11 +11137,10 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3 ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB107_2 ; GFX1250-GISEL-NEXT: .LBB107_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v6, v0 +; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v6 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -11476,11 +11266,10 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 @@ -11498,9 +11287,8 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB108_2 ; GFX1250-SDAG-NEXT: .LBB108_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -11516,15 +11304,14 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; ; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_nortn: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB108_3 @@ -11541,10 +11328,9 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB108_2 ; GFX1250-GISEL-NEXT: .LBB108_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -11648,11 +11434,9 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1] ; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1 ; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, src_flat_scratch_base_hi, v1 ; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4 ; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB109_3 @@ -11669,9 +11453,8 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB109_2 ; GFX1250-SDAG-NEXT: .LBB109_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1] -; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0 +; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, src_flat_scratch_base_lo, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -11687,8 +11470,7 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; ; GFX1250-GISEL-LABEL: flat_dec_saddr_i64_nortn_neg128: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, src_flat_scratch_base_hi :: v_dual_mov_b32 v4, v1 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) @@ -11698,7 +11480,7 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, v3, v6 +; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, src_flat_scratch_base_hi, v3 ; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1 ; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB109_3 @@ -11715,10 +11497,9 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo ; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0 ; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB109_2 ; GFX1250-GISEL-NEXT: .LBB109_4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_sub_nc_u32_e32 v0, v2, v0 +; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -11834,12 +11615,10 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2 ; GFX1250-SDAG-NEXT: s_cbranch_vccz .LBB110_3 ; GFX1250-SDAG-NEXT: ; %bb.1: ; %atomicrmw.check.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2 ; GFX1250-SDAG-NEXT: s_cbranch_vccz .LBB110_4 ; GFX1250-SDAG-NEXT: ; %bb.2: ; %atomicrmw.global @@ -11855,9 +11634,7 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: .LBB110_4: ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX1250-SDAG-NEXT: .LBB110_5: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, s2, -1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s2 @@ -11891,10 +11668,9 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB110_6 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %atomicrmw.check.private -; GFX1250-GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s2, 0x4000000 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 1 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB110_3 @@ -11910,9 +11686,7 @@ define double @flat_atomic_fadd_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s2, 0 ; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB110_5 ; GFX1250-GISEL-NEXT: ; %bb.4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-GISEL-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-GISEL-NEXT: s_cselect_b32 s2, s2, -1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s2 @@ -12060,12 +11834,10 @@ define void @flat_atomic_fadd_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: .LBB111_2: ; %atomicrmw.phi ; GFX1250-SDAG-NEXT: s_set_pc_i64 s[30:31] ; GFX1250-SDAG-NEXT: .LBB111_3: ; %atomicrmw.check.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi +; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2 ; GFX1250-SDAG-NEXT: s_mov_b32 s2, -1 ; GFX1250-SDAG-NEXT: s_cbranch_vccz .LBB111_5 @@ -12079,9 +11851,7 @@ define void @flat_atomic_fadd_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2 ; GFX1250-SDAG-NEXT: s_cbranch_vccnz .LBB111_7 ; GFX1250-SDAG-NEXT: ; %bb.6: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, s2, -1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s2 @@ -12112,9 +11882,8 @@ define void @flat_atomic_fadd_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s1, s3 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB111_6 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %atomicrmw.check.private -; GFX1250-GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-GISEL-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s2, 0x4000000 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 1 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB111_3 @@ -12130,9 +11899,7 @@ define void @flat_atomic_fadd_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s2, 0 ; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB111_5 ; GFX1250-GISEL-NEXT: ; %bb.4: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-GISEL-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-GISEL-NEXT: s_cselect_b32 s2, s2, -1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s2 @@ -12261,9 +12028,8 @@ define double @flat_atomic_fmax_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0x50 -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) @@ -12279,10 +12045,9 @@ define double @flat_atomic_fmax_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: .LBB112_2: ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX1250-SDAG-NEXT: .LBB112_3: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 +; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-SDAG-NEXT: s_cselect_b32 s0, s2, -1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 @@ -12301,10 +12066,9 @@ define double @flat_atomic_fmax_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_add_co_u32 s2, s0, 0x50 ; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s3, s1, 0 -; GFX1250-GISEL-NEXT: s_mov_b32 s4, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, s4 +; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s4, 0x4000000 ; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB112_2 @@ -12320,10 +12084,9 @@ define double @flat_atomic_fmax_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s0, 0 ; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB112_4 ; GFX1250-GISEL-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo -; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, s0 +; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-GISEL-NEXT: s_cselect_b32 s0, s0, -1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -12413,9 +12176,8 @@ define void @flat_atomic_fmax_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0x50 -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) @@ -12434,11 +12196,10 @@ define void @flat_atomic_fmax_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB113_2 ; GFX1250-SDAG-NEXT: .LBB113_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo +; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo +; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2 -; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-SDAG-NEXT: s_cselect_b32 s0, s2, -1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 @@ -12454,9 +12215,8 @@ define void @flat_atomic_fmax_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_add_co_u32 s2, s0, 0x50 ; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s3, s1, 0 -; GFX1250-GISEL-NEXT: s_mov_b32 s4, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, s4 +; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s4, 0x4000000 ; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB113_2 @@ -12473,10 +12233,9 @@ define void @flat_atomic_fmax_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s0, 0 ; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB113_4 ; GFX1250-GISEL-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo -; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, s0 +; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-GISEL-NEXT: s_cselect_b32 s0, s0, -1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -12562,9 +12321,8 @@ define double @flat_atomic_fmin_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0x50 -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) @@ -12580,10 +12338,9 @@ define double @flat_atomic_fmin_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: .LBB114_2: ; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX1250-SDAG-NEXT: .LBB114_3: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo -; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2 +; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo ; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 +; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-SDAG-NEXT: s_cselect_b32 s0, s2, -1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 @@ -12602,10 +12359,9 @@ define double @flat_atomic_fmin_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_add_co_u32 s2, s0, 0x50 ; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s3, s1, 0 -; GFX1250-GISEL-NEXT: s_mov_b32 s4, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, s4 +; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s4, 0x4000000 ; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB114_2 @@ -12621,10 +12377,9 @@ define double @flat_atomic_fmin_f64_saddr_rtn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s0, 0 ; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB114_4 ; GFX1250-GISEL-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo -; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, s0 +; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-GISEL-NEXT: s_cselect_b32 s0, s0, -1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 @@ -12714,9 +12469,8 @@ define void @flat_atomic_fmin_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_add_nc_u64 s[0:1], s[0:1], 0x50 -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, s2 +; GFX1250-SDAG-NEXT: s_xor_b32 s2, s1, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s2, 0x4000000 ; GFX1250-SDAG-NEXT: s_cselect_b32 s2, -1, 0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) @@ -12735,11 +12489,10 @@ define void @flat_atomic_fmin_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-SDAG-NEXT: s_wait_storecnt_dscnt 0x0 ; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB115_2 ; GFX1250-SDAG-NEXT: .LBB115_4: ; %atomicrmw.private -; GFX1250-SDAG-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo +; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, src_flat_scratch_base_lo +; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 ; GFX1250-SDAG-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-SDAG-NEXT: s_sub_co_i32 s2, s0, s2 -; GFX1250-SDAG-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GFX1250-SDAG-NEXT: s_cselect_b32 s0, s2, -1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 @@ -12755,9 +12508,8 @@ define void @flat_atomic_fmin_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_add_co_u32 s2, s0, 0x50 ; GFX1250-GISEL-NEXT: s_add_co_ci_u32 s3, s1, 0 -; GFX1250-GISEL-NEXT: s_mov_b32 s4, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, s4 +; GFX1250-GISEL-NEXT: s_xor_b32 s4, s3, src_flat_scratch_base_hi ; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s4, 0x4000000 ; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1 ; GFX1250-GISEL-NEXT: s_cbranch_scc0 .LBB115_2 @@ -12774,10 +12526,9 @@ define void @flat_atomic_fmin_f64_saddr_nortn(ptr inreg %ptr, double %data) { ; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s0, 0 ; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB115_4 ; GFX1250-GISEL-NEXT: ; %bb.3: ; %atomicrmw.private -; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo -; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] -; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, s0 +; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s2, src_flat_scratch_base_lo ; GFX1250-GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1250-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-GISEL-NEXT: s_cselect_b32 s0, s0, -1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[2:3], off, s0 ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll index 2079543..b5b2655 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll @@ -2179,6 +2179,7 @@ define amdgpu_ps void @flat_addr_64bit_lsr_iv_multiload(ptr inreg %arg, ptr inre ; GFX1250-SDAG-NEXT: flat_load_b32 v1, v0, s[4:5] scope:SCOPE_SYS ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: s_cmp_eq_u32 s0, 0x400 +; GFX1250-SDAG-NEXT: ; kill: killed $sgpr4_sgpr5 ; GFX1250-SDAG-NEXT: s_cbranch_scc0 .LBB117_1 ; GFX1250-SDAG-NEXT: ; %bb.2: ; %bb2 ; GFX1250-SDAG-NEXT: s_endpgm @@ -2190,15 +2191,16 @@ define amdgpu_ps void @flat_addr_64bit_lsr_iv_multiload(ptr inreg %arg, ptr inre ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[0:1] ; GFX1250-GISEL-NEXT: .LBB117_1: ; %bb3 ; GFX1250-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v0, v2 ; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, v1, v3, vcc_lo ; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 4, v[2:3] +; GFX1250-GISEL-NEXT: ; kill: killed $vgpr4 killed $vgpr5 +; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0 +; GFX1250-GISEL-NEXT: flat_load_b32 v6, v[4:5] scope:SCOPE_SYS +; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GFX1250-GISEL-NEXT: flat_load_b32 v6, v[4:5] scope:SCOPE_SYS -; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: flat_load_b32 v4, v[4:5] scope:SCOPE_SYS ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 ; GFX1250-GISEL-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0x400, v2 ; GFX1250-GISEL-NEXT: s_cbranch_vccz .LBB117_1 diff --git a/llvm/test/CodeGen/AMDGPU/fmax3.ll b/llvm/test/CodeGen/AMDGPU/fmax3.ll index 4827f75..5e6de6d 100644 --- a/llvm/test/CodeGen/AMDGPU/fmax3.ll +++ b/llvm/test/CodeGen/AMDGPU/fmax3.ll @@ -162,32 +162,32 @@ define amdgpu_kernel void @test_fmax3_olt_0_f32(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-LABEL: test_fmax3_olt_0_f32: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s18, s10 -; GFX1250-NEXT: s_mov_b32 s19, s11 -; GFX1250-NEXT: s_mov_b32 s22, s10 -; GFX1250-NEXT: s_mov_b32 s23, s11 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s6, s2 +; GFX1250-NEXT: s_mov_b32 s7, s3 +; GFX1250-NEXT: s_mov_b32 s18, s2 +; GFX1250-NEXT: s_mov_b32 s19, s3 +; GFX1250-NEXT: s_mov_b32 s22, s2 +; GFX1250-NEXT: s_mov_b32 s23, s3 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s12, s2 -; GFX1250-NEXT: s_mov_b32 s13, s3 -; GFX1250-NEXT: s_mov_b32 s16, s4 -; GFX1250-NEXT: s_mov_b32 s17, s5 -; GFX1250-NEXT: s_mov_b32 s20, s6 -; GFX1250-NEXT: s_mov_b32 s21, s7 -; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s10 +; GFX1250-NEXT: s_mov_b32 s5, s11 +; GFX1250-NEXT: s_mov_b32 s16, s12 +; GFX1250-NEXT: s_mov_b32 s17, s13 +; GFX1250-NEXT: s_mov_b32 s20, s14 +; GFX1250-NEXT: s_mov_b32 s21, s15 +; GFX1250-NEXT: buffer_load_b32 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: s_mov_b32 s0, s8 +; GFX1250-NEXT: s_mov_b32 s1, s9 ; GFX1250-NEXT: v_max3_num_f32 v0, v0, v1, v2 -; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null +; GFX1250-NEXT: buffer_store_b32 v0, off, s[0:3], null ; GFX1250-NEXT: s_endpgm %a = load volatile float, ptr addrspace(1) %aptr, align 4 %b = load volatile float, ptr addrspace(1) %bptr, align 4 @@ -352,32 +352,32 @@ define amdgpu_kernel void @test_fmax3_olt_1_f32(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-LABEL: test_fmax3_olt_1_f32: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s18, s10 -; GFX1250-NEXT: s_mov_b32 s19, s11 -; GFX1250-NEXT: s_mov_b32 s22, s10 -; GFX1250-NEXT: s_mov_b32 s23, s11 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s6, s2 +; GFX1250-NEXT: s_mov_b32 s7, s3 +; GFX1250-NEXT: s_mov_b32 s18, s2 +; GFX1250-NEXT: s_mov_b32 s19, s3 +; GFX1250-NEXT: s_mov_b32 s22, s2 +; GFX1250-NEXT: s_mov_b32 s23, s3 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s12, s2 -; GFX1250-NEXT: s_mov_b32 s13, s3 -; GFX1250-NEXT: s_mov_b32 s16, s4 -; GFX1250-NEXT: s_mov_b32 s17, s5 -; GFX1250-NEXT: s_mov_b32 s20, s6 -; GFX1250-NEXT: s_mov_b32 s21, s7 -; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s10 +; GFX1250-NEXT: s_mov_b32 s5, s11 +; GFX1250-NEXT: s_mov_b32 s16, s12 +; GFX1250-NEXT: s_mov_b32 s17, s13 +; GFX1250-NEXT: s_mov_b32 s20, s14 +; GFX1250-NEXT: s_mov_b32 s21, s15 +; GFX1250-NEXT: buffer_load_b32 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: s_mov_b32 s0, s8 +; GFX1250-NEXT: s_mov_b32 s1, s9 ; GFX1250-NEXT: v_max3_num_f32 v0, v2, v0, v1 -; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null +; GFX1250-NEXT: buffer_store_b32 v0, off, s[0:3], null ; GFX1250-NEXT: s_endpgm %a = load volatile float, ptr addrspace(1) %aptr, align 4 %b = load volatile float, ptr addrspace(1) %bptr, align 4 @@ -609,62 +609,62 @@ define amdgpu_kernel void @test_fmax3_olt_0_f16(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-TRUE16-LABEL: test_fmax3_olt_0_f16: ; GFX1250-TRUE16: ; %bb.0: -; GFX1250-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-TRUE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-TRUE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-TRUE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-TRUE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-TRUE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-TRUE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-TRUE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-TRUE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-TRUE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-TRUE16-NEXT: v_max3_num_f16 v0.l, v0.l, v1.l, v2.l -; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-TRUE16-NEXT: s_endpgm ; ; GFX1250-FAKE16-LABEL: test_fmax3_olt_0_f16: ; GFX1250-FAKE16: ; %bb.0: -; GFX1250-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-FAKE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-FAKE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-FAKE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-FAKE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-FAKE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-FAKE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-FAKE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-FAKE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-FAKE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-FAKE16-NEXT: v_max3_num_f16 v0, v0, v1, v2 -; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-FAKE16-NEXT: s_endpgm %a = load volatile half, ptr addrspace(1) %aptr, align 2 %b = load volatile half, ptr addrspace(1) %bptr, align 2 @@ -897,62 +897,62 @@ define amdgpu_kernel void @test_fmax3_olt_1_f16(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-TRUE16-LABEL: test_fmax3_olt_1_f16: ; GFX1250-TRUE16: ; %bb.0: -; GFX1250-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-TRUE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-TRUE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-TRUE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-TRUE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-TRUE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-TRUE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-TRUE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-TRUE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v2, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-TRUE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-TRUE16-NEXT: v_max3_num_f16 v0.l, v0.l, v1.l, v2.l -; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-TRUE16-NEXT: s_endpgm ; ; GFX1250-FAKE16-LABEL: test_fmax3_olt_1_f16: ; GFX1250-FAKE16: ; %bb.0: -; GFX1250-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-FAKE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-FAKE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-FAKE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-FAKE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-FAKE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-FAKE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-FAKE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-FAKE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-FAKE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-FAKE16-NEXT: v_max3_num_f16 v0, v2, v0, v1 -; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-FAKE16-NEXT: s_endpgm %a = load volatile half, ptr addrspace(1) %aptr, align 2 %b = load volatile half, ptr addrspace(1) %bptr, align 2 diff --git a/llvm/test/CodeGen/AMDGPU/fmin3.ll b/llvm/test/CodeGen/AMDGPU/fmin3.ll index 6dfefd8..6a6f232 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin3.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin3.ll @@ -162,32 +162,32 @@ define amdgpu_kernel void @test_fmin3_olt_0_f32(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-LABEL: test_fmin3_olt_0_f32: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s18, s10 -; GFX1250-NEXT: s_mov_b32 s19, s11 -; GFX1250-NEXT: s_mov_b32 s22, s10 -; GFX1250-NEXT: s_mov_b32 s23, s11 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s6, s2 +; GFX1250-NEXT: s_mov_b32 s7, s3 +; GFX1250-NEXT: s_mov_b32 s18, s2 +; GFX1250-NEXT: s_mov_b32 s19, s3 +; GFX1250-NEXT: s_mov_b32 s22, s2 +; GFX1250-NEXT: s_mov_b32 s23, s3 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s12, s2 -; GFX1250-NEXT: s_mov_b32 s13, s3 -; GFX1250-NEXT: s_mov_b32 s16, s4 -; GFX1250-NEXT: s_mov_b32 s17, s5 -; GFX1250-NEXT: s_mov_b32 s20, s6 -; GFX1250-NEXT: s_mov_b32 s21, s7 -; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s10 +; GFX1250-NEXT: s_mov_b32 s5, s11 +; GFX1250-NEXT: s_mov_b32 s16, s12 +; GFX1250-NEXT: s_mov_b32 s17, s13 +; GFX1250-NEXT: s_mov_b32 s20, s14 +; GFX1250-NEXT: s_mov_b32 s21, s15 +; GFX1250-NEXT: buffer_load_b32 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: s_mov_b32 s0, s8 +; GFX1250-NEXT: s_mov_b32 s1, s9 ; GFX1250-NEXT: v_min3_num_f32 v0, v0, v1, v2 -; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null +; GFX1250-NEXT: buffer_store_b32 v0, off, s[0:3], null ; GFX1250-NEXT: s_endpgm %a = load volatile float, ptr addrspace(1) %aptr, align 4 %b = load volatile float, ptr addrspace(1) %bptr, align 4 @@ -352,32 +352,32 @@ define amdgpu_kernel void @test_fmin3_olt_1_f32(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-LABEL: test_fmin3_olt_1_f32: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s18, s10 -; GFX1250-NEXT: s_mov_b32 s19, s11 -; GFX1250-NEXT: s_mov_b32 s22, s10 -; GFX1250-NEXT: s_mov_b32 s23, s11 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s6, s2 +; GFX1250-NEXT: s_mov_b32 s7, s3 +; GFX1250-NEXT: s_mov_b32 s18, s2 +; GFX1250-NEXT: s_mov_b32 s19, s3 +; GFX1250-NEXT: s_mov_b32 s22, s2 +; GFX1250-NEXT: s_mov_b32 s23, s3 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s12, s2 -; GFX1250-NEXT: s_mov_b32 s13, s3 -; GFX1250-NEXT: s_mov_b32 s16, s4 -; GFX1250-NEXT: s_mov_b32 s17, s5 -; GFX1250-NEXT: s_mov_b32 s20, s6 -; GFX1250-NEXT: s_mov_b32 s21, s7 -; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s10 +; GFX1250-NEXT: s_mov_b32 s5, s11 +; GFX1250-NEXT: s_mov_b32 s16, s12 +; GFX1250-NEXT: s_mov_b32 s17, s13 +; GFX1250-NEXT: s_mov_b32 s20, s14 +; GFX1250-NEXT: s_mov_b32 s21, s15 +; GFX1250-NEXT: buffer_load_b32 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b32 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: s_mov_b32 s0, s8 +; GFX1250-NEXT: s_mov_b32 s1, s9 ; GFX1250-NEXT: v_min3_num_f32 v0, v2, v0, v1 -; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null +; GFX1250-NEXT: buffer_store_b32 v0, off, s[0:3], null ; GFX1250-NEXT: s_endpgm %a = load volatile float, ptr addrspace(1) %aptr, align 4 %b = load volatile float, ptr addrspace(1) %bptr, align 4 @@ -609,62 +609,62 @@ define amdgpu_kernel void @test_fmin3_olt_0_f16(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-TRUE16-LABEL: test_fmin3_olt_0_f16: ; GFX1250-TRUE16: ; %bb.0: -; GFX1250-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-TRUE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-TRUE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-TRUE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-TRUE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-TRUE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-TRUE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-TRUE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-TRUE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-TRUE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-TRUE16-NEXT: v_min3_num_f16 v0.l, v0.l, v1.l, v2.l -; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-TRUE16-NEXT: s_endpgm ; ; GFX1250-FAKE16-LABEL: test_fmin3_olt_0_f16: ; GFX1250-FAKE16: ; %bb.0: -; GFX1250-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-FAKE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-FAKE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-FAKE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-FAKE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-FAKE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-FAKE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-FAKE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-FAKE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-FAKE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-FAKE16-NEXT: v_min3_num_f16 v0, v0, v1, v2 -; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-FAKE16-NEXT: s_endpgm %a = load volatile half, ptr addrspace(1) %aptr, align 2 %b = load volatile half, ptr addrspace(1) %bptr, align 2 @@ -897,62 +897,62 @@ define amdgpu_kernel void @test_fmin3_olt_1_f16(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-TRUE16-LABEL: test_fmin3_olt_1_f16: ; GFX1250-TRUE16: ; %bb.0: -; GFX1250-TRUE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-TRUE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-TRUE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-TRUE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-TRUE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-TRUE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-TRUE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-TRUE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-TRUE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-TRUE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-TRUE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-TRUE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-TRUE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-TRUE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-TRUE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-TRUE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-TRUE16-NEXT: buffer_load_u16 v1, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v2, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-TRUE16-NEXT: buffer_load_u16 v0, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-TRUE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-TRUE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-TRUE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-TRUE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-TRUE16-NEXT: v_min3_num_f16 v0.l, v0.l, v1.l, v2.l -; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-TRUE16-NEXT: s_endpgm ; ; GFX1250-FAKE16-LABEL: test_fmin3_olt_1_f16: ; GFX1250-FAKE16: ; %bb.0: -; GFX1250-FAKE16-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-FAKE16-NEXT: s_mov_b32 s10, -1 -; GFX1250-FAKE16-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-FAKE16-NEXT: s_mov_b32 s14, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s15, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s11 -; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s10 -; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s11 +; GFX1250-FAKE16-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-FAKE16-NEXT: s_mov_b32 s2, -1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-FAKE16-NEXT: s_mov_b32 s6, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s7, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s18, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s19, s3 +; GFX1250-FAKE16-NEXT: s_mov_b32 s22, s2 +; GFX1250-FAKE16-NEXT: s_mov_b32 s23, s3 ; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s12, s2 -; GFX1250-FAKE16-NEXT: s_mov_b32 s13, s3 -; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s4 -; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s5 -; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s6 -; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s7 -; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[12:15], null scope:SCOPE_SYS +; GFX1250-FAKE16-NEXT: s_mov_b32 s4, s10 +; GFX1250-FAKE16-NEXT: s_mov_b32 s5, s11 +; GFX1250-FAKE16-NEXT: s_mov_b32 s16, s12 +; GFX1250-FAKE16-NEXT: s_mov_b32 s17, s13 +; GFX1250-FAKE16-NEXT: s_mov_b32 s20, s14 +; GFX1250-FAKE16-NEXT: s_mov_b32 s21, s15 +; GFX1250-FAKE16-NEXT: buffer_load_u16 v0, off, s[4:7], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v1, off, s[16:19], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 ; GFX1250-FAKE16-NEXT: buffer_load_u16 v2, off, s[20:23], null scope:SCOPE_SYS ; GFX1250-FAKE16-NEXT: s_wait_loadcnt 0x0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s8, s0 -; GFX1250-FAKE16-NEXT: s_mov_b32 s9, s1 +; GFX1250-FAKE16-NEXT: s_mov_b32 s0, s8 +; GFX1250-FAKE16-NEXT: s_mov_b32 s1, s9 ; GFX1250-FAKE16-NEXT: v_min3_num_f16 v0, v2, v0, v1 -; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], null +; GFX1250-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null ; GFX1250-FAKE16-NEXT: s_endpgm %a = load volatile half, ptr addrspace(1) %aptr, align 2 %b = load volatile half, ptr addrspace(1) %bptr, align 2 @@ -1217,36 +1217,36 @@ define amdgpu_kernel void @test_fmin3_olt_0_f64(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-LABEL: test_fmin3_olt_0_f64: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s18, s10 -; GFX1250-NEXT: s_mov_b32 s19, s11 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s6, s2 +; GFX1250-NEXT: s_mov_b32 s7, s3 +; GFX1250-NEXT: s_mov_b32 s18, s2 +; GFX1250-NEXT: s_mov_b32 s19, s3 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s12, s2 -; GFX1250-NEXT: s_mov_b32 s13, s3 -; GFX1250-NEXT: s_mov_b32 s16, s4 -; GFX1250-NEXT: s_mov_b32 s17, s5 -; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s10 +; GFX1250-NEXT: s_mov_b32 s5, s11 +; GFX1250-NEXT: s_mov_b32 s16, s12 +; GFX1250-NEXT: s_mov_b32 s17, s13 +; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b64 v[2:3], off, s[16:19], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: s_wait_xcnt 0x1 -; GFX1250-NEXT: s_mov_b32 s12, s6 -; GFX1250-NEXT: s_mov_b32 s13, s7 -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: buffer_load_b64 v[4:5], off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s14 +; GFX1250-NEXT: s_mov_b32 s5, s15 +; GFX1250-NEXT: s_mov_b32 s0, s8 +; GFX1250-NEXT: buffer_load_b64 v[4:5], off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: s_mov_b32 s1, s9 ; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3] ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3] ; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[4:5] ; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3] -; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[8:11], null +; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null ; GFX1250-NEXT: s_endpgm %a = load volatile double, ptr addrspace(1) %aptr, align 4 %b = load volatile double, ptr addrspace(1) %bptr, align 4 @@ -1427,36 +1427,36 @@ define amdgpu_kernel void @test_fmin3_olt_1_f64(ptr addrspace(1) %out, ptr addrs ; ; GFX1250-LABEL: test_fmin3_olt_1_f64: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s18, s10 -; GFX1250-NEXT: s_mov_b32 s19, s11 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 +; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s6, s2 +; GFX1250-NEXT: s_mov_b32 s7, s3 +; GFX1250-NEXT: s_mov_b32 s18, s2 +; GFX1250-NEXT: s_mov_b32 s19, s3 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s12, s2 -; GFX1250-NEXT: s_mov_b32 s13, s3 -; GFX1250-NEXT: s_mov_b32 s16, s4 -; GFX1250-NEXT: s_mov_b32 s17, s5 -; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s10 +; GFX1250-NEXT: s_mov_b32 s5, s11 +; GFX1250-NEXT: s_mov_b32 s16, s12 +; GFX1250-NEXT: s_mov_b32 s17, s13 +; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: buffer_load_b64 v[2:3], off, s[16:19], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: s_wait_xcnt 0x1 -; GFX1250-NEXT: s_mov_b32 s12, s6 -; GFX1250-NEXT: s_mov_b32 s13, s7 -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: buffer_load_b64 v[4:5], off, s[12:15], null scope:SCOPE_SYS +; GFX1250-NEXT: s_mov_b32 s4, s14 +; GFX1250-NEXT: s_mov_b32 s5, s15 +; GFX1250-NEXT: s_mov_b32 s0, s8 +; GFX1250-NEXT: buffer_load_b64 v[4:5], off, s[4:7], null scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: s_mov_b32 s1, s9 ; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1] ; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[2:3], v[2:3] ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3] ; GFX1250-NEXT: v_max_num_f64_e32 v[2:3], v[4:5], v[4:5] ; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[2:3], v[0:1] -; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[8:11], null +; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null ; GFX1250-NEXT: s_endpgm %a = load volatile double, ptr addrspace(1) %aptr, align 4 %b = load volatile double, ptr addrspace(1) %bptr, align 4 diff --git a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll index e532dea..f807169 100644 --- a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll +++ b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll @@ -11,22 +11,20 @@ define void @test_i8load_v4i8store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-SDAG: ; %bb.0: ; GCN-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-SDAG-NEXT: s_wait_kmcnt 0x0 -; GCN-SDAG-NEXT: global_load_u8 v2, v[2:3], off -; GCN-SDAG-NEXT: global_load_u8 v3, v[4:5], off -; GCN-SDAG-NEXT: global_load_u8 v0, v[0:1], off +; GCN-SDAG-NEXT: global_load_u8 v6, v[2:3], off +; GCN-SDAG-NEXT: global_load_u8 v7, v[4:5], off +; GCN-SDAG-NEXT: global_load_u8 v10, v[0:1], off ; GCN-SDAG-NEXT: s_wait_loadcnt 0x2 ; GCN-SDAG-NEXT: s_wait_xcnt 0x0 -; GCN-SDAG-NEXT: v_lshlrev_b16 v1, 8, v2 +; GCN-SDAG-NEXT: v_lshlrev_b16 v0, 8, v6 ; GCN-SDAG-NEXT: s_wait_loadcnt 0x1 -; GCN-SDAG-NEXT: v_lshlrev_b16 v2, 8, v3 +; GCN-SDAG-NEXT: v_lshlrev_b16 v1, 8, v7 +; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GCN-SDAG-NEXT: v_or_b32_e32 v1, v7, v1 ; GCN-SDAG-NEXT: s_wait_loadcnt 0x0 -; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GCN-SDAG-NEXT: v_or_b32_e32 v0, v0, v1 -; GCN-SDAG-NEXT: v_or_b32_e32 v1, v3, v2 -; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GCN-SDAG-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GCN-SDAG-NEXT: v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_bitop2_b32 v0, v10, v0 bitop3:0x54 +; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GCN-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GCN-SDAG-NEXT: v_or_b32_e32 v0, v0, v1 ; GCN-SDAG-NEXT: global_store_b32 v[8:9], v0, off ; GCN-SDAG-NEXT: s_set_pc_i64 s[30:31] @@ -35,13 +33,15 @@ define void @test_i8load_v4i8store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-GISEL: ; %bb.0: ; GCN-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-GISEL-NEXT: s_wait_kmcnt 0x0 -; GCN-GISEL-NEXT: global_load_u8 v0, v[0:1], off -; GCN-GISEL-NEXT: global_load_u8 v1, v[2:3], off -; GCN-GISEL-NEXT: global_load_u8 v2, v[4:5], off +; GCN-GISEL-NEXT: global_load_u8 v6, v[0:1], off +; GCN-GISEL-NEXT: global_load_u8 v7, v[2:3], off +; GCN-GISEL-NEXT: global_load_u8 v10, v[4:5], off ; GCN-GISEL-NEXT: s_wait_loadcnt 0x1 -; GCN-GISEL-NEXT: v_lshl_or_b32 v0, v1, 8, v0 +; GCN-GISEL-NEXT: s_wait_xcnt 0x2 +; GCN-GISEL-NEXT: v_lshl_or_b32 v0, v7, 8, v6 ; GCN-GISEL-NEXT: s_wait_loadcnt 0x0 -; GCN-GISEL-NEXT: v_dual_lshlrev_b32 v1, 16, v2 :: v_dual_lshlrev_b32 v2, 24, v2 +; GCN-GISEL-NEXT: s_wait_xcnt 0x1 +; GCN-GISEL-NEXT: v_dual_lshlrev_b32 v1, 16, v10 :: v_dual_lshlrev_b32 v2, 24, v10 ; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GCN-GISEL-NEXT: v_or3_b32 v0, v0, v1, v2 ; GCN-GISEL-NEXT: global_store_b32 v[8:9], v0, off @@ -64,21 +64,21 @@ define i16 @test_v7i16_load_store(ptr addrspace(1) %ptr1, ptr addrspace(1) %ptr2 ; GCN-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-SDAG-NEXT: s_wait_kmcnt 0x0 ; GCN-SDAG-NEXT: global_load_b128 v[4:7], v[0:1], off -; GCN-SDAG-NEXT: global_load_b128 v[0:3], v[2:3], off -; GCN-SDAG-NEXT: v_mov_b64_e32 v[8:9], 0 -; GCN-SDAG-NEXT: s_wait_loadcnt 0x0 -; GCN-SDAG-NEXT: v_pk_add_u16 v10, v6, v2 -; GCN-SDAG-NEXT: v_pk_add_u16 v11, v7, v3 +; GCN-SDAG-NEXT: global_load_b128 v[8:11], v[2:3], off ; GCN-SDAG-NEXT: s_wait_xcnt 0x0 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[2:3], 12 +; GCN-SDAG-NEXT: s_wait_loadcnt 0x0 +; GCN-SDAG-NEXT: v_pk_add_u16 v1, v6, v10 +; GCN-SDAG-NEXT: v_pk_add_u16 v12, v7, v11 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[6:7], 8 -; GCN-SDAG-NEXT: v_pk_add_u16 v4, v4, v0 -; GCN-SDAG-NEXT: v_lshrrev_b32_e32 v0, 16, v10 -; GCN-SDAG-NEXT: v_pk_add_u16 v5, v5, v1 +; GCN-SDAG-NEXT: v_mov_b64_e32 v[10:11], 0 +; GCN-SDAG-NEXT: v_pk_add_u16 v5, v5, v9 +; GCN-SDAG-NEXT: v_lshrrev_b32_e32 v0, 16, v1 +; GCN-SDAG-NEXT: v_pk_add_u16 v4, v4, v8 ; GCN-SDAG-NEXT: s_clause 0x2 -; GCN-SDAG-NEXT: global_store_b16 v[2:3], v11, off -; GCN-SDAG-NEXT: global_store_b32 v[6:7], v10, off -; GCN-SDAG-NEXT: global_store_b64 v[8:9], v[4:5], off +; GCN-SDAG-NEXT: global_store_b16 v[2:3], v12, off +; GCN-SDAG-NEXT: global_store_b32 v[6:7], v1, off +; GCN-SDAG-NEXT: global_store_b64 v[10:11], v[4:5], off ; GCN-SDAG-NEXT: s_set_pc_i64 s[30:31] ; ; GCN-GISEL-LABEL: test_v7i16_load_store: @@ -86,28 +86,29 @@ define i16 @test_v7i16_load_store(ptr addrspace(1) %ptr1, ptr addrspace(1) %ptr2 ; GCN-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 ; GCN-GISEL-NEXT: s_wait_kmcnt 0x0 ; GCN-GISEL-NEXT: global_load_b128 v[4:7], v[0:1], off -; GCN-GISEL-NEXT: global_load_b128 v[0:3], v[2:3], off -; GCN-GISEL-NEXT: v_mov_b64_e32 v[8:9], 0 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[10:11], 2 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[12:13], 4 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[14:15], 6 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[16:17], 8 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[18:19], 10 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[20:21], 12 +; GCN-GISEL-NEXT: global_load_b128 v[8:11], v[2:3], off +; GCN-GISEL-NEXT: s_wait_xcnt 0x0 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], 0 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[12:13], 2 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[14:15], 4 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[16:17], 6 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[18:19], 8 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[20:21], 10 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[22:23], 12 ; GCN-GISEL-NEXT: s_wait_loadcnt 0x0 -; GCN-GISEL-NEXT: v_pk_add_u16 v2, v6, v2 -; GCN-GISEL-NEXT: v_pk_add_u16 v4, v4, v0 -; GCN-GISEL-NEXT: v_pk_add_u16 v1, v5, v1 -; GCN-GISEL-NEXT: v_pk_add_u16 v3, v7, v3 +; GCN-GISEL-NEXT: v_pk_add_u16 v1, v6, v10 +; GCN-GISEL-NEXT: v_pk_add_u16 v4, v4, v8 +; GCN-GISEL-NEXT: v_pk_add_u16 v5, v5, v9 +; GCN-GISEL-NEXT: v_pk_add_u16 v6, v7, v11 ; GCN-GISEL-NEXT: s_clause 0x6 -; GCN-GISEL-NEXT: global_store_b16 v[8:9], v4, off -; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[10:11], v4, off -; GCN-GISEL-NEXT: global_store_b16 v[12:13], v1, off -; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[14:15], v1, off -; GCN-GISEL-NEXT: global_store_b16 v[16:17], v2, off -; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[18:19], v2, off -; GCN-GISEL-NEXT: global_store_b16 v[20:21], v3, off -; GCN-GISEL-NEXT: v_lshrrev_b32_e32 v0, 16, v2 +; GCN-GISEL-NEXT: global_store_b16 v[2:3], v4, off +; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[12:13], v4, off +; GCN-GISEL-NEXT: global_store_b16 v[14:15], v5, off +; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[16:17], v5, off +; GCN-GISEL-NEXT: global_store_b16 v[18:19], v1, off +; GCN-GISEL-NEXT: global_store_d16_hi_b16 v[20:21], v1, off +; GCN-GISEL-NEXT: global_store_b16 v[22:23], v6, off +; GCN-GISEL-NEXT: v_lshrrev_b32_e32 v0, 16, v1 ; GCN-GISEL-NEXT: s_set_pc_i64 s[30:31] %vec1 = load <7 x i16>, ptr addrspace(1) %ptr1 %insert = insertelement <7 x i16> %vec1, i16 20, i32 4 @@ -253,8 +254,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-SDAG-NEXT: global_load_b128 v[22:25], v[0:1], off offset:32 ; GCN-SDAG-NEXT: global_load_b128 v[26:29], v[0:1], off offset:16 ; GCN-SDAG-NEXT: global_load_b128 v[30:33], v[0:1], off -; GCN-SDAG-NEXT: global_load_b128 v[0:3], v[0:1], off offset:64 -; GCN-SDAG-NEXT: v_mov_b64_e32 v[36:37], 0x70 +; GCN-SDAG-NEXT: global_load_b128 v[34:37], v[0:1], off offset:64 +; GCN-SDAG-NEXT: v_mov_b64_e32 v[2:3], 0x70 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[48:49], 48 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[38:39], 0x60 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[50:51], 32 @@ -262,14 +263,15 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-SDAG-NEXT: v_mov_b64_e32 v[66:67], 0 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[52:53], 0x50 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[54:55], 64 -; GCN-SDAG-NEXT: v_dual_mov_b32 v34, 0xc8 :: v_dual_mov_b32 v35, 0 +; GCN-SDAG-NEXT: s_wait_xcnt 0x0 +; GCN-SDAG-NEXT: v_dual_mov_b32 v0, 0xc8 :: v_dual_mov_b32 v1, 0 ; GCN-SDAG-NEXT: s_wait_loadcnt 0x7 -; GCN-SDAG-NEXT: global_store_b128 v[36:37], v[6:9], off +; GCN-SDAG-NEXT: global_store_b128 v[2:3], v[6:9], off ; GCN-SDAG-NEXT: s_wait_loadcnt 0x6 ; GCN-SDAG-NEXT: global_store_b128 v[38:39], v[10:13], off ; GCN-SDAG-NEXT: s_wait_loadcnt 0x5 ; GCN-SDAG-NEXT: s_wait_xcnt 0x1 -; GCN-SDAG-NEXT: v_dual_mov_b32 v36, v16 :: v_dual_mov_b32 v37, v17 +; GCN-SDAG-NEXT: v_dual_mov_b32 v2, v16 :: v_dual_mov_b32 v3, v17 ; GCN-SDAG-NEXT: s_wait_xcnt 0x0 ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[12:13], v[12:13], v[12:13] ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[10:11], v[10:11], v[10:11] @@ -286,8 +288,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[8:9], v[8:9], v[8:9] ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[6:7], v[6:7], v[6:7] ; GCN-SDAG-NEXT: s_wait_loadcnt 0x0 -; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[50:51], v[2:3], v[2:3] -; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[48:49], v[0:1], v[0:1] +; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[50:51], v[36:37], v[36:37] +; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[48:49], v[34:35], v[34:35] ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[16:17], v[16:17], v[16:17] ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[14:15], 0xc8, v[14:15] ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[24:25], 0x64, v[24:25] @@ -298,8 +300,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[20:21], v[20:21], v[20:21] ; GCN-SDAG-NEXT: v_add_nc_u64_e32 v[18:19], v[18:19], v[18:19] ; GCN-SDAG-NEXT: s_clause 0x1 -; GCN-SDAG-NEXT: global_store_b128 v[52:53], v[34:37], off -; GCN-SDAG-NEXT: global_store_b128 v[54:55], v[0:3], off +; GCN-SDAG-NEXT: global_store_b128 v[52:53], v[0:3], off +; GCN-SDAG-NEXT: global_store_b128 v[54:55], v[34:37], off ; GCN-SDAG-NEXT: s_clause 0x7 ; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[10:13], off offset:96 ; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[6:9], off offset:112 @@ -309,7 +311,7 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[18:21], off offset:48 ; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[30:33], off ; GCN-SDAG-NEXT: global_store_b128 v[4:5], v[26:29], off offset:16 -; GCN-SDAG-NEXT: s_wait_xcnt 0x8 +; GCN-SDAG-NEXT: s_wait_xcnt 0x9 ; GCN-SDAG-NEXT: v_dual_mov_b32 v0, v32 :: v_dual_mov_b32 v1, v33 ; GCN-SDAG-NEXT: s_set_pc_i64 s[30:31] ; @@ -325,7 +327,7 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-GISEL-NEXT: global_load_b128 v[22:25], v[0:1], off offset:48 ; GCN-GISEL-NEXT: global_load_b128 v[26:29], v[0:1], off offset:96 ; GCN-GISEL-NEXT: global_load_b128 v[30:33], v[0:1], off offset:112 -; GCN-GISEL-NEXT: global_load_b128 v[0:3], v[0:1], off offset:64 +; GCN-GISEL-NEXT: global_load_b128 v[34:37], v[0:1], off offset:64 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[38:39], 0 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[48:49], 16 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[50:51], 32 @@ -333,7 +335,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-GISEL-NEXT: v_mov_b64_e32 v[66:67], 0x60 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[68:69], 0x70 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[54:55], 64 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[34:35], 0xc8 +; GCN-GISEL-NEXT: s_wait_xcnt 0x0 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[0:1], 0xc8 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[64:65], 0x50 ; GCN-GISEL-NEXT: s_wait_loadcnt 0x6 ; GCN-GISEL-NEXT: global_store_b128 v[38:39], v[10:13], off @@ -349,7 +352,7 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-GISEL-NEXT: global_store_b128 v[68:69], v[30:33], off ; GCN-GISEL-NEXT: s_wait_xcnt 0x5 ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[12:13], v[12:13], v[12:13] -; GCN-GISEL-NEXT: v_mov_b64_e32 v[36:37], v[8:9] +; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], v[8:9] ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[10:11], v[10:11], v[10:11] ; GCN-GISEL-NEXT: s_wait_xcnt 0x4 ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[14:15], v[14:15], v[14:15] @@ -361,8 +364,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[22:23], v[22:23], v[22:23] ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[24:25], v[24:25], v[24:25] ; GCN-GISEL-NEXT: s_wait_loadcnt 0x0 -; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[48:49], v[0:1], v[0:1] -; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[50:51], v[2:3], v[2:3] +; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[48:49], v[34:35], v[34:35] +; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[50:51], v[36:37], v[36:37] ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[6:7], 0xc8, v[6:7] ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[8:9], v[8:9], v[8:9] ; GCN-GISEL-NEXT: s_wait_xcnt 0x1 @@ -372,8 +375,8 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[30:31], v[30:31], v[30:31] ; GCN-GISEL-NEXT: v_add_nc_u64_e32 v[32:33], v[32:33], v[32:33] ; GCN-GISEL-NEXT: s_clause 0x1 -; GCN-GISEL-NEXT: global_store_b128 v[54:55], v[0:3], off -; GCN-GISEL-NEXT: global_store_b128 v[64:65], v[34:37], off +; GCN-GISEL-NEXT: global_store_b128 v[54:55], v[34:37], off +; GCN-GISEL-NEXT: global_store_b128 v[64:65], v[0:3], off ; GCN-GISEL-NEXT: s_clause 0x7 ; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[10:13], off ; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[14:17], off offset:16 @@ -383,7 +386,7 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt ; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[6:9], off offset:80 ; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[26:29], off offset:96 ; GCN-GISEL-NEXT: global_store_b128 v[4:5], v[30:33], off offset:112 -; GCN-GISEL-NEXT: s_wait_xcnt 0x9 +; GCN-GISEL-NEXT: s_wait_xcnt 0x8 ; GCN-GISEL-NEXT: v_dual_mov_b32 v0, v12 :: v_dual_mov_b32 v1, v13 ; GCN-GISEL-NEXT: s_set_pc_i64 s[30:31] %a = load <16 x i64>, ptr addrspace(1) %ptr_a, align 4 @@ -402,16 +405,17 @@ define amdgpu_kernel void @test_v7i16_load_store_kernel(ptr addrspace(1) %ptr1, ; GCN-SDAG-LABEL: test_v7i16_load_store_kernel: ; GCN-SDAG: ; %bb.0: ; GCN-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GCN-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GCN-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GCN-SDAG-NEXT: s_wait_xcnt 0x0 ; GCN-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 -; GCN-SDAG-NEXT: v_mov_b64_e32 v[8:9], 12 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[10:11], 8 ; GCN-SDAG-NEXT: v_mov_b64_e32 v[12:13], 0 ; GCN-SDAG-NEXT: s_wait_kmcnt 0x0 ; GCN-SDAG-NEXT: s_clause 0x1 -; GCN-SDAG-NEXT: global_load_b128 v[0:3], v4, s[0:1] scale_offset -; GCN-SDAG-NEXT: global_load_b128 v[4:7], v4, s[2:3] scale_offset +; GCN-SDAG-NEXT: global_load_b128 v[0:3], v8, s[0:1] scale_offset +; GCN-SDAG-NEXT: global_load_b128 v[4:7], v8, s[2:3] scale_offset +; GCN-SDAG-NEXT: s_wait_xcnt 0x0 +; GCN-SDAG-NEXT: v_mov_b64_e32 v[8:9], 12 ; GCN-SDAG-NEXT: s_wait_loadcnt 0x0 ; GCN-SDAG-NEXT: v_pk_add_u16 v3, v3, v7 ; GCN-SDAG-NEXT: v_pk_add_u16 v2, v2, v6 @@ -428,10 +432,9 @@ define amdgpu_kernel void @test_v7i16_load_store_kernel(ptr addrspace(1) %ptr1, ; GCN-GISEL-LABEL: test_v7i16_load_store_kernel: ; GCN-GISEL: ; %bb.0: ; GCN-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GCN-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GCN-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GCN-GISEL-NEXT: s_wait_xcnt 0x0 ; GCN-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 -; GCN-GISEL-NEXT: v_mov_b64_e32 v[8:9], 0 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[10:11], 2 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[12:13], 4 ; GCN-GISEL-NEXT: v_mov_b64_e32 v[14:15], 6 @@ -440,8 +443,10 @@ define amdgpu_kernel void @test_v7i16_load_store_kernel(ptr addrspace(1) %ptr1, ; GCN-GISEL-NEXT: v_mov_b64_e32 v[20:21], 12 ; GCN-GISEL-NEXT: s_wait_kmcnt 0x0 ; GCN-GISEL-NEXT: s_clause 0x1 -; GCN-GISEL-NEXT: global_load_b128 v[0:3], v4, s[0:1] scale_offset -; GCN-GISEL-NEXT: global_load_b128 v[4:7], v4, s[2:3] scale_offset +; GCN-GISEL-NEXT: global_load_b128 v[0:3], v8, s[0:1] scale_offset +; GCN-GISEL-NEXT: global_load_b128 v[4:7], v8, s[2:3] scale_offset +; GCN-GISEL-NEXT: s_wait_xcnt 0x0 +; GCN-GISEL-NEXT: v_mov_b64_e32 v[8:9], 0 ; GCN-GISEL-NEXT: s_wait_loadcnt 0x0 ; GCN-GISEL-NEXT: v_pk_add_u16 v0, v0, v4 ; GCN-GISEL-NEXT: v_pk_add_u16 v1, v1, v5 diff --git a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir index 92836d8..63db24a 100644 --- a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir +++ b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir @@ -486,7 +486,7 @@ body: | ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 - ; CHECK-NEXT: INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; CHECK-NEXT: S_ENDPGM 0 bb.0: S_NOP 0, implicit-def $agpr0 @@ -516,7 +516,7 @@ body: | S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47 S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55 S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63 - INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, %0:vreg_512_align2 + INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, %0:vreg_512_align2 S_ENDPGM 0 ... @@ -1368,7 +1368,7 @@ body: | ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} @@ -1408,7 +1408,7 @@ body: | undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, %4 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, %4 S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.2 @@ -1726,7 +1726,7 @@ body: | ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1) ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc ; CHECK-NEXT: S_BRANCH %bb.2 ; CHECK-NEXT: {{ $}} @@ -1763,7 +1763,7 @@ body: | undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1) %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 39190537 /* reguse:VReg_512_Align2 */, %4 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40239113 /* reguse:VReg_512_Align2 */, %4 S_CBRANCH_VCCNZ %bb.1, implicit $vcc S_BRANCH %bb.2 diff --git a/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll b/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll index 9cbdc38..5b3e486 100644 --- a/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll +++ b/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll @@ -8,16 +8,16 @@ define amdgpu_kernel void @s_input_output_i128() { ; GFX908-LABEL: name: s_input_output_i128 ; GFX908: bb.0 (%ir-block.0): - ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 9633802 /* regdef:SGPR_128 */, def %13 + ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10682378 /* regdef:SGPR_128 */, def %13 ; GFX908-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY %13 - ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9633801 /* reguse:SGPR_128 */, [[COPY]] + ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 10682377 /* reguse:SGPR_128 */, [[COPY]] ; GFX908-NEXT: S_ENDPGM 0 ; ; GFX90A-LABEL: name: s_input_output_i128 ; GFX90A: bb.0 (%ir-block.0): - ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 9633802 /* regdef:SGPR_128 */, def %11 + ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10682378 /* regdef:SGPR_128 */, def %11 ; GFX90A-NEXT: [[COPY:%[0-9]+]]:sgpr_128 = COPY %11 - ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9633801 /* reguse:SGPR_128 */, [[COPY]] + ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 10682377 /* reguse:SGPR_128 */, [[COPY]] ; GFX90A-NEXT: S_ENDPGM 0 %val = tail call i128 asm sideeffect "; def $0", "=s"() call void asm sideeffect "; use $0", "s"(i128 %val) @@ -27,16 +27,16 @@ define amdgpu_kernel void @s_input_output_i128() { define amdgpu_kernel void @v_input_output_i128() { ; GFX908-LABEL: name: v_input_output_i128 ; GFX908: bb.0 (%ir-block.0): - ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7798794 /* regdef:VReg_128 */, def %13 + ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7929866 /* regdef:VReg_128 */, def %13 ; GFX908-NEXT: [[COPY:%[0-9]+]]:vreg_128 = COPY %13 - ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7798793 /* reguse:VReg_128 */, [[COPY]] + ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7929865 /* reguse:VReg_128 */, [[COPY]] ; GFX908-NEXT: S_ENDPGM 0 ; ; GFX90A-LABEL: name: v_input_output_i128 ; GFX90A: bb.0 (%ir-block.0): - ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7995402 /* regdef:VReg_128_Align2 */, def %11 + ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8257546 /* regdef:VReg_128_Align2 */, def %11 ; GFX90A-NEXT: [[COPY:%[0-9]+]]:vreg_128_align2 = COPY %11 - ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7995401 /* reguse:VReg_128_Align2 */, [[COPY]] + ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8257545 /* reguse:VReg_128_Align2 */, [[COPY]] ; GFX90A-NEXT: S_ENDPGM 0 %val = tail call i128 asm sideeffect "; def $0", "=v"() call void asm sideeffect "; use $0", "v"(i128 %val) @@ -47,16 +47,16 @@ define amdgpu_kernel void @a_input_output_i128() { ; GFX908-LABEL: name: a_input_output_i128 ; GFX908: bb.0 (%ir-block.0): - ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8323082 /* regdef:AReg_128 */, def %13 + ; GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8847370 /* regdef:AReg_128 */, def %13 ; GFX908-NEXT: [[COPY:%[0-9]+]]:areg_128 = COPY %13 - ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8323081 /* reguse:AReg_128 */, [[COPY]] + ; GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8847369 /* reguse:AReg_128 */, [[COPY]] ; GFX908-NEXT: S_ENDPGM 0 ; ; GFX90A-LABEL: name: a_input_output_i128 ; GFX90A: bb.0 (%ir-block.0): - ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8650762 /* regdef:AReg_128_Align2 */, def %11 + ; GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 9568266 /* regdef:AReg_128_Align2 */, def %11 ; GFX90A-NEXT: [[COPY:%[0-9]+]]:areg_128_align2 = COPY %11 - ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY]] + ; GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY]] ; GFX90A-NEXT: S_ENDPGM 0 %val = call i128 asm sideeffect "; def $0", "=a"() call void asm sideeffect "; use $0", "a"(i128 %val) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.id.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.id.ll index 90fcb51..fa97380 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.id.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.id.ll @@ -11,14 +11,11 @@ declare i32 @llvm.amdgcn.cluster.id.z() #0 define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) { ; CHECK-UNKNOWN-LABEL: test_cluster_id_x: ; CHECK-UNKNOWN: ; %bb.0: -; CHECK-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24 ; CHECK-UNKNOWN-NEXT: v_dual_mov_b32 v0, ttmp9 :: v_dual_mov_b32 v1, 0 ; CHECK-UNKNOWN-NEXT: s_wait_kmcnt 0x0 -; CHECK-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1] +; CHECK-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3] ; CHECK-UNKNOWN-NEXT: s_endpgm -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1 -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0 -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0 ; ; CHECK-MESA3D-LABEL: test_cluster_id_x: ; CHECK-MESA3D: .amd_kernel_code_t @@ -68,7 +65,7 @@ define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -98,14 +95,11 @@ define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) { ; ; CHECK-G-UNKNOWN-LABEL: test_cluster_id_x: ; CHECK-G-UNKNOWN: ; %bb.0: -; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24 ; CHECK-G-UNKNOWN-NEXT: v_dual_mov_b32 v0, ttmp9 :: v_dual_mov_b32 v1, 0 ; CHECK-G-UNKNOWN-NEXT: s_wait_kmcnt 0x0 -; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1] +; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3] ; CHECK-G-UNKNOWN-NEXT: s_endpgm -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1 -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0 -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0 ; ; CHECK-G-MESA3D-LABEL: test_cluster_id_x: ; CHECK-G-MESA3D: .amd_kernel_code_t @@ -155,7 +149,7 @@ define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -190,14 +184,11 @@ define amdgpu_kernel void @test_cluster_id_x(ptr addrspace(1) %out) { define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 { ; CHECK-UNKNOWN-LABEL: test_cluster_id_y: ; CHECK-UNKNOWN: ; %bb.0: -; CHECK-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24 ; CHECK-UNKNOWN-NEXT: v_dual_mov_b32 v0, ttmp7 :: v_dual_mov_b32 v1, 0 ; CHECK-UNKNOWN-NEXT: s_wait_kmcnt 0x0 -; CHECK-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1] +; CHECK-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3] ; CHECK-UNKNOWN-NEXT: s_endpgm -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1 -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 1 -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0 ; ; CHECK-MESA3D-LABEL: test_cluster_id_y: ; CHECK-MESA3D: .amd_kernel_code_t @@ -247,7 +238,7 @@ define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -277,14 +268,11 @@ define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 { ; ; CHECK-G-UNKNOWN-LABEL: test_cluster_id_y: ; CHECK-G-UNKNOWN: ; %bb.0: -; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24 ; CHECK-G-UNKNOWN-NEXT: v_dual_mov_b32 v0, ttmp7 :: v_dual_mov_b32 v1, 0 ; CHECK-G-UNKNOWN-NEXT: s_wait_kmcnt 0x0 -; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1] +; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3] ; CHECK-G-UNKNOWN-NEXT: s_endpgm -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1 -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 1 -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 0 ; ; CHECK-G-MESA3D-LABEL: test_cluster_id_y: ; CHECK-G-MESA3D: .amd_kernel_code_t @@ -334,7 +322,7 @@ define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -369,16 +357,14 @@ define amdgpu_kernel void @test_cluster_id_y(ptr addrspace(1) %out) #1 { define amdgpu_kernel void @test_cluster_id_z(ptr addrspace(1) %out) #1 { ; CHECK-UNKNOWN-LABEL: test_cluster_id_z: ; CHECK-UNKNOWN: ; %bb.0: -; CHECK-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 -; CHECK-UNKNOWN-NEXT: s_lshr_b32 s2, ttmp7, 16 +; CHECK-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24 +; CHECK-UNKNOWN-NEXT: s_wait_xcnt 0x0 +; CHECK-UNKNOWN-NEXT: s_lshr_b32 s0, ttmp7, 16 ; CHECK-UNKNOWN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-UNKNOWN-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; CHECK-UNKNOWN-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 ; CHECK-UNKNOWN-NEXT: s_wait_kmcnt 0x0 -; CHECK-UNKNOWN-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-UNKNOWN-NEXT: global_store_b32 v0, v1, s[2:3] ; CHECK-UNKNOWN-NEXT: s_endpgm -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1 -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0 -; CHECK-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 1 ; ; CHECK-MESA3D-LABEL: test_cluster_id_z: ; CHECK-MESA3D: .amd_kernel_code_t @@ -428,7 +414,7 @@ define amdgpu_kernel void @test_cluster_id_z(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -460,16 +446,14 @@ define amdgpu_kernel void @test_cluster_id_z(ptr addrspace(1) %out) #1 { ; ; CHECK-G-UNKNOWN-LABEL: test_cluster_id_z: ; CHECK-G-UNKNOWN: ; %bb.0: -; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 -; CHECK-G-UNKNOWN-NEXT: s_lshr_b32 s2, ttmp7, 16 +; CHECK-G-UNKNOWN-NEXT: s_load_b64 s[2:3], s[0:1], 0x24 +; CHECK-G-UNKNOWN-NEXT: s_wait_xcnt 0x0 +; CHECK-G-UNKNOWN-NEXT: s_lshr_b32 s0, ttmp7, 16 ; CHECK-G-UNKNOWN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-G-UNKNOWN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 +; CHECK-G-UNKNOWN-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s0 ; CHECK-G-UNKNOWN-NEXT: s_wait_kmcnt 0x0 -; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[0:1] +; CHECK-G-UNKNOWN-NEXT: global_store_b32 v1, v0, s[2:3] ; CHECK-G-UNKNOWN-NEXT: s_endpgm -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_X_EN: 1 -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Y_EN: 0 -; CHECK-G-UNKNOWN: COMPUTE_PGM_RSRC2:TGID_Z_EN: 1 ; ; CHECK-G-MESA3D-LABEL: test_cluster_id_z: ; CHECK-G-MESA3D: .amd_kernel_code_t @@ -519,7 +503,7 @@ define amdgpu_kernel void @test_cluster_id_z(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.id.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.id.ll index aa3b7b3..3ef84a3 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.id.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.id.ll @@ -67,7 +67,7 @@ define amdgpu_kernel void @test_workgroup_id_x(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -155,7 +155,7 @@ define amdgpu_kernel void @test_workgroup_id_x(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -246,7 +246,7 @@ define amdgpu_kernel void @test_workgroup_id_x_optimized(ptr addrspace(1) %out) ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -330,7 +330,7 @@ define amdgpu_kernel void @test_workgroup_id_x_optimized(ptr addrspace(1) %out) ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -421,7 +421,7 @@ define amdgpu_kernel void @test_workgroup_id_y(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -509,7 +509,7 @@ define amdgpu_kernel void @test_workgroup_id_y(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -600,7 +600,7 @@ define amdgpu_kernel void @test_workgroup_id_y_optimized(ptr addrspace(1) %out) ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -684,7 +684,7 @@ define amdgpu_kernel void @test_workgroup_id_y_optimized(ptr addrspace(1) %out) ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -775,7 +775,7 @@ define amdgpu_kernel void @test_workgroup_id_z(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -863,7 +863,7 @@ define amdgpu_kernel void @test_workgroup_id_z(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -956,7 +956,7 @@ define amdgpu_kernel void @test_workgroup_flat_id(ptr addrspace(1) %out) { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -1044,7 +1044,7 @@ define amdgpu_kernel void @test_workgroup_flat_id(ptr addrspace(1) %out) { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -1135,7 +1135,7 @@ define amdgpu_kernel void @test_workgroup_id_z_optimized(ptr addrspace(1) %out) ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -1219,7 +1219,7 @@ define amdgpu_kernel void @test_workgroup_id_z_optimized(ptr addrspace(1) %out) ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.flat.id.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.flat.id.ll index afe37e3..b8ff9e5 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.flat.id.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.flat.id.ll @@ -65,7 +65,7 @@ define amdgpu_kernel void @test_workgroup_max_flat_id(ptr addrspace(1) %out) #1 ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -153,7 +153,7 @@ define amdgpu_kernel void @test_workgroup_max_flat_id(ptr addrspace(1) %out) #1 ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.id.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.id.ll index 7ea4fa5..9bca696 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.id.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cluster.workgroup.max.id.ll @@ -67,7 +67,7 @@ define amdgpu_kernel void @test_workgroup_max_id_x(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -155,7 +155,7 @@ define amdgpu_kernel void @test_workgroup_max_id_x(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -246,7 +246,7 @@ define amdgpu_kernel void @test_workgroup_max_id_x_optimized(ptr addrspace(1) %o ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -330,7 +330,7 @@ define amdgpu_kernel void @test_workgroup_max_id_x_optimized(ptr addrspace(1) %o ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -421,7 +421,7 @@ define amdgpu_kernel void @test_workgroup_max_id_y(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -509,7 +509,7 @@ define amdgpu_kernel void @test_workgroup_max_id_y(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -600,7 +600,7 @@ define amdgpu_kernel void @test_workgroup_max_id_y_optimized(ptr addrspace(1) %o ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -684,7 +684,7 @@ define amdgpu_kernel void @test_workgroup_max_id_y_optimized(ptr addrspace(1) %o ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -775,7 +775,7 @@ define amdgpu_kernel void @test_workgroup_max_id_z(ptr addrspace(1) %out) #1 { ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -863,7 +863,7 @@ define amdgpu_kernel void @test_workgroup_max_id_z(ptr addrspace(1) %out) #1 { ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -954,7 +954,7 @@ define amdgpu_kernel void @test_workgroup_max_id_z_optimized(ptr addrspace(1) %o ; CHECK-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-MESA3D-NEXT: gds_segment_byte_size = 0 @@ -1038,7 +1038,7 @@ define amdgpu_kernel void @test_workgroup_max_id_z_optimized(ptr addrspace(1) %o ; CHECK-G-MESA3D-NEXT: is_ptr64 = 1 ; CHECK-G-MESA3D-NEXT: is_dynamic_callstack = 0 ; CHECK-G-MESA3D-NEXT: is_debug_enabled = 0 -; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 0 +; CHECK-G-MESA3D-NEXT: is_xnack_enabled = 1 ; CHECK-G-MESA3D-NEXT: workitem_private_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: workgroup_group_segment_byte_size = 0 ; CHECK-G-MESA3D-NEXT: gds_segment_byte_size = 0 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll index 56215ca..67d0410 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll @@ -59,21 +59,20 @@ define amdgpu_kernel void @is_private_vgpr(ptr addrspace(1) %ptr.ptr) { ; GFX9-NEXT: global_store_dword v[0:1], v0, off ; GFX9-NEXT: s_endpgm ; -; GFX1250-SDAG-LABEL: is_private_vgpr: -; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 -; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v0, s[0:1] scale_offset scope:SCOPE_SYS -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 -; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v1 -; GFX1250-SDAG-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x4000000, v0 -; GFX1250-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo -; GFX1250-SDAG-NEXT: global_store_b32 v[0:1], v0, off -; GFX1250-SDAG-NEXT: s_endpgm +; GFX1250-LABEL: is_private_vgpr: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 +; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_b64 v[0:1], v0, s[0:1] scale_offset scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x4000000, v0 +; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo +; GFX1250-NEXT: global_store_b32 v[0:1], v0, off +; GFX1250-NEXT: s_endpgm ; ; CI-GISEL-LABEL: is_private_vgpr: ; CI-GISEL: ; %bb.0: @@ -122,22 +121,6 @@ define amdgpu_kernel void @is_private_vgpr(ptr addrspace(1) %ptr.ptr) { ; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo ; GFX11-NEXT: global_store_b32 v[0:1], v0, off ; GFX11-NEXT: s_endpgm -; -; GFX1250-GISEL-LABEL: is_private_vgpr: -; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v0, s[0:1] scale_offset scope:SCOPE_SYS -; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 -; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v1, v2 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x4000000, v0 -; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo -; GFX1250-GISEL-NEXT: global_store_b32 v[0:1], v0, off -; GFX1250-GISEL-NEXT: s_endpgm %id = call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds ptr, ptr addrspace(1) %ptr.ptr, i32 %id %ptr = load volatile ptr, ptr addrspace(1) %gep @@ -206,9 +189,8 @@ define amdgpu_kernel void @is_private_sgpr(ptr %ptr) { ; GFX1250-SDAG-LABEL: is_private_sgpr: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x4 -; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: s_xor_b32 s0, s0, s1 +; GFX1250-SDAG-NEXT: s_xor_b32 s0, s0, src_flat_scratch_base_hi ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; GFX1250-SDAG-NEXT: s_cmp_lt_u32 s0, 0x4000000 ; GFX1250-SDAG-NEXT: s_cselect_b32 s0, -1, 0 @@ -285,9 +267,8 @@ define amdgpu_kernel void @is_private_sgpr(ptr %ptr) { ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_xor_b32 s0, s1, s0 +; GFX1250-GISEL-NEXT: s_xor_b32 s0, s1, src_flat_scratch_base_hi +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_cmp_ge_u32 s0, 0x4000000 ; GFX1250-GISEL-NEXT: s_cbranch_scc1 .LBB1_2 ; GFX1250-GISEL-NEXT: ; %bb.1: ; %bb0 @@ -311,5 +292,4 @@ bb1: ; CI: {{.*}} ; GFX10-GISEL: {{.*}} ; GFX11-GISEL: {{.*}} -; GFX1250: {{.*}} ; SI-SDAG: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane.gfx1250.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane.gfx1250.ll index 4f7bbf8..42a50bb 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane.gfx1250.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane.gfx1250.ll @@ -5,13 +5,13 @@ define amdgpu_kernel void @v_permlane_bcast_b32_vss(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) { ; GFX1250-LABEL: v_permlane_bcast_b32_vss: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX1250-NEXT: s_load_b32 s6, s[4:5], 0x34 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_permlane_bcast_b32 v0, v0, s3, s4 +; GFX1250-NEXT: v_permlane_bcast_b32 v0, v0, s3, s6 ; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX1250-NEXT: s_endpgm %v = call i32 @llvm.amdgcn.permlane.bcast(i32 %src0, i32 %src1, i32 %src2) @@ -92,13 +92,13 @@ define amdgpu_kernel void @v_permlane_bcast_b32_vvv(ptr addrspace(1) %out, i32 % define amdgpu_kernel void @v_permlane_down_b32_vss(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) { ; GFX1250-LABEL: v_permlane_down_b32_vss: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX1250-NEXT: s_load_b32 s6, s[4:5], 0x34 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_permlane_down_b32 v0, v0, s3, s4 +; GFX1250-NEXT: v_permlane_down_b32 v0, v0, s3, s6 ; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX1250-NEXT: s_endpgm %v = call i32 @llvm.amdgcn.permlane.down(i32 %src0, i32 %src1, i32 %src2) @@ -179,13 +179,13 @@ define amdgpu_kernel void @v_permlane_down_b32_vvv(ptr addrspace(1) %out, i32 %s define amdgpu_kernel void @v_permlane_up_b32_vss(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) { ; GFX1250-LABEL: v_permlane_up_b32_vss: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX1250-NEXT: s_load_b32 s6, s[4:5], 0x34 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_permlane_up_b32 v0, v0, s3, s4 +; GFX1250-NEXT: v_permlane_up_b32 v0, v0, s3, s6 ; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX1250-NEXT: s_endpgm %v = call i32 @llvm.amdgcn.permlane.up(i32 %src0, i32 %src1, i32 %src2) @@ -266,13 +266,13 @@ define amdgpu_kernel void @v_permlane_up_b32_vvv(ptr addrspace(1) %out, i32 %src define amdgpu_kernel void @v_permlane_xor_b32_vss(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) { ; GFX1250-LABEL: v_permlane_xor_b32_vss: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX1250-NEXT: s_load_b32 s6, s[4:5], 0x34 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_permlane_xor_b32 v0, v0, s3, s4 +; GFX1250-NEXT: v_permlane_xor_b32 v0, v0, s3, s6 ; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX1250-NEXT: s_endpgm %v = call i32 @llvm.amdgcn.permlane.xor(i32 %src0, i32 %src1, i32 %src2) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll index de7d234..b9bf76c 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 < %s | FileCheck -check-prefixes=GFX11 %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 < %s | FileCheck -check-prefixes=GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG %s declare i32 @llvm.amdgcn.s.quadmask.i32(i32) declare i64 @llvm.amdgcn.s.quadmask.i64(i64) @@ -172,3 +172,91 @@ entry: %qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %mask) ret i64 %qm } + +;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_b32 implicitly defines SCC. +define amdgpu_kernel void @test_scc_quadmask_32(i32 %val0, i32 %val1, ptr addrspace(1) %ptr) { +; GFX11-GISEL-LABEL: test_scc_quadmask_32: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-NEXT: s_and_b32 s0, s0, 1 +; GFX11-GISEL-NEXT: s_quadmask_b32 s1, s1 +; GFX11-GISEL-NEXT: s_cmp_eq_u32 s0, 0 +; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, s1 +; GFX11-GISEL-NEXT: s_cselect_b32 s0, 1, 0 +; GFX11-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v4, s0 +; GFX11-GISEL-NEXT: global_store_b32 v2, v3, s[2:3] +; GFX11-GISEL-NEXT: global_store_b32 v[0:1], v4, off +; GFX11-GISEL-NEXT: s_endpgm +; +; GFX11-SDAG-LABEL: test_scc_quadmask_32: +; GFX11-SDAG: ; %bb.0: +; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, 0 +; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-SDAG-NEXT: s_and_b32 s0, s0, 1 +; GFX11-SDAG-NEXT: s_quadmask_b32 s1, s1 +; GFX11-SDAG-NEXT: s_cmp_eq_u32 s0, 0 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1 +; GFX11-SDAG-NEXT: s_cselect_b32 s0, -1, 0 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, 0 +; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, s0 +; GFX11-SDAG-NEXT: global_store_b32 v2, v3, s[2:3] +; GFX11-SDAG-NEXT: global_store_b32 v[0:1], v4, off +; GFX11-SDAG-NEXT: s_endpgm + %and = and i32 %val0, 1 + %result = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %val1) nounwind readnone + store i32 %result, ptr addrspace(1) %ptr + %cmp = icmp eq i32 %and, 0 + %sel = select i1 %cmp, i32 1, i32 0 + store i32 %sel, ptr addrspace(1) null, align 4 + ret void +} + +;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_b64 implicitly defines SCC. +define amdgpu_kernel void @test_scc_quadmask_64(i32 %val0, i64 %val1, ptr addrspace(1) %ptr) { +; GFX11-GISEL-LABEL: test_scc_quadmask_64: +; GFX11-GISEL: ; %bb.0: +; GFX11-GISEL-NEXT: s_clause 0x1 +; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c +; GFX11-GISEL-NEXT: s_load_b32 s4, s[4:5], 0x24 +; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-NEXT: s_quadmask_b64 s[0:1], s[0:1] +; GFX11-GISEL-NEXT: s_and_b32 s4, s4, 1 +; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX11-GISEL-NEXT: s_cmp_eq_u32 s4, 0 +; GFX11-GISEL-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v1, s1 +; GFX11-GISEL-NEXT: s_cselect_b32 s0, 1, 0 +; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v5, s0 +; GFX11-GISEL-NEXT: v_mov_b32_e32 v3, 0 +; GFX11-GISEL-NEXT: global_store_b64 v4, v[0:1], s[2:3] +; GFX11-GISEL-NEXT: global_store_b32 v[2:3], v5, off +; GFX11-GISEL-NEXT: s_endpgm +; +; GFX11-SDAG-LABEL: test_scc_quadmask_64: +; GFX11-SDAG: ; %bb.0: +; GFX11-SDAG-NEXT: s_clause 0x1 +; GFX11-SDAG-NEXT: s_load_b32 s6, s[4:5], 0x24 +; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c +; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, 0 +; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-SDAG-NEXT: s_and_b32 s4, s6, 1 +; GFX11-SDAG-NEXT: s_quadmask_b64 s[0:1], s[0:1] +; GFX11-SDAG-NEXT: s_cmp_eq_u32 s4, 0 +; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, s0 +; GFX11-SDAG-NEXT: s_cselect_b32 s0, -1, 0 +; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, 0 +; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v5, 0, 1, s0 +; GFX11-SDAG-NEXT: global_store_b64 v4, v[2:3], s[2:3] +; GFX11-SDAG-NEXT: global_store_b32 v[0:1], v5, off +; GFX11-SDAG-NEXT: s_endpgm + %and = and i32 %val0, 1 + %result = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %val1) nounwind readnone + store i64 %result, ptr addrspace(1) %ptr + %cmp = icmp eq i32 %and, 0 + %sel = select i1 %cmp, i32 1, i32 0 + store i32 %sel, ptr addrspace(1) null, align 4 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir b/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir index 76e2092..abcae69 100644 --- a/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir +++ b/llvm/test/CodeGen/AMDGPU/load-store-opt-scale-offset.mir @@ -69,9 +69,9 @@ body: | bb.0: ; GCN-LABEL: name: merge_s_load_x1_x1_imm_no_scale_offset ; GCN: [[DEF:%[0-9]+]]:sgpr_64 = IMPLICIT_DEF - ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[DEF]], 0, 0 :: (dereferenceable invariant load (s64), align 4) - ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_32_xm0_xexec = COPY [[S_LOAD_DWORDX2_IMM]].sub0 - ; GCN-NEXT: [[COPY1:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed [[S_LOAD_DWORDX2_IMM]].sub1 + ; GCN-NEXT: early-clobber %3:sreg_64_xexec = S_LOAD_DWORDX2_IMM_ec [[DEF]], 0, 0 :: (dereferenceable invariant load (s64), align 4) + ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_32_xm0_xexec = COPY %3.sub0 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:sreg_32_xm0_xexec = COPY killed %3.sub1 %0:sgpr_64 = IMPLICIT_DEF %1:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0:sgpr_64, 0, 0 :: (dereferenceable invariant load (s32)) %2:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0:sgpr_64, 4, 0 :: (dereferenceable invariant load (s32)) diff --git a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll index 1e6b77e..4ad161c 100644 --- a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll +++ b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll @@ -471,13 +471,13 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r ; GFX1250-NEXT: s_cmp_eq_u32 s0, 0 ; GFX1250-NEXT: s_cbranch_scc1 .LBB4_3 ; GFX1250-NEXT: ; %bb.1: ; %for.body.preheader -; GFX1250-NEXT: s_load_b128 s[4:7], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b128 s[8:11], s[4:5], 0x24 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_lshlrev_b32 v0, 4, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], s[6:7], v[0:1] -; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], s[4:5], v[0:1] +; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], s[10:11], v[0:1] +; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], s[8:9], v[0:1] ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], 0xb0, v[2:3] ; GFX1250-NEXT: .LBB4_2: ; %for.body @@ -602,13 +602,13 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d, ; GFX1250-NEXT: s_cmp_eq_u32 s0, 0 ; GFX1250-NEXT: s_cbranch_scc1 .LBB5_3 ; GFX1250-NEXT: ; %bb.1: ; %for.body.preheader -; GFX1250-NEXT: s_load_b128 s[4:7], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b128 s[8:11], s[4:5], 0x24 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_lshlrev_b32 v0, 4, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], s[6:7], v[0:1] -; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], s[4:5], v[0:1] +; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], s[10:11], v[0:1] +; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], s[8:9], v[0:1] ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) ; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], 0xb0, v[2:3] ; GFX1250-NEXT: .LBB5_2: ; %for.body diff --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll index 0de7f8f..bd29e9e 100644 --- a/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll +++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals -; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s +; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s | FileCheck %s ; Regression test for issue 160181 ; One variable is chosen to be assigned at zero. Here, that's @both @@ -22,12 +22,20 @@ ;. ; CHECK: @llvm.amdgcn.module.lds = internal addrspace(3) global %llvm.amdgcn.module.lds.t poison, align 4, !absolute_symbol [[META0:![0-9]+]] ; CHECK: @llvm.compiler.used = appending addrspace(1) global [1 x ptr] [ptr addrspacecast (ptr addrspace(3) @llvm.amdgcn.module.lds to ptr)], section "llvm.metadata" +; CHECK: @llvm.amdgcn.kernel.kern_one.lds = internal addrspace(3) global %llvm.amdgcn.kernel.kern_one.lds.t poison, align 4, !absolute_symbol [[META1:![0-9]+]] +; CHECK: @llvm.amdgcn.kernel.kern_two.lds = internal addrspace(3) global %llvm.amdgcn.kernel.kern_two.lds.t poison, align 4, !absolute_symbol [[META1]] +; CHECK: @llvm.amdgcn.kernel.kern_block_direct_allocation.lds = internal addrspace(3) global %llvm.amdgcn.kernel.kern_block_direct_allocation.lds.t poison, align 4, !absolute_symbol [[META1]] + ;. define void @func_one() { ; CHECK-LABEL: define {{[^@]+}}@func_one() { -; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META1:![0-9]+]] -; CHECK-NEXT: store i32 [[VAL0]], ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META18:![0-9]+]] -; CHECK-NEXT: store i16 10, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 3), align 4, !noalias [[META23:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id() +; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META2:![0-9]+]] +; CHECK-NEXT: [[ONE:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[ONE]], align 4 +; CHECK-NEXT: [[ONE1:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3) +; CHECK-NEXT: store i32 [[VAL0]], ptr addrspace(3) [[ONE1]], align 4 +; CHECK-NEXT: store i16 10, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META11:![0-9]+]] ; CHECK-NEXT: ret void ; %val0 = load i32, ptr addrspace(3) @both @@ -38,9 +46,10 @@ define void @func_one() { define amdgpu_kernel void @kern_one() { ; CHECK-LABEL: define {{[^@]+}}@kern_one -; CHECK-SAME: () #[[ATTR0:[0-9]+]] { +; CHECK-SAME: () #[[ATTR0:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META16:![0-9]+]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !noalias [[META24:![0-9]+]] +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.kern_one.lds) ] +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !noalias [[META17:![0-9]+]] ; CHECK-NEXT: call void @func_one() ; CHECK-NEXT: ret void ; @@ -51,9 +60,13 @@ entry: define void @func_two() { ; CHECK-LABEL: define {{[^@]+}}@func_two() { -; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META1]] -; CHECK-NEXT: store i32 [[VAL0]], ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 2), align 4, !noalias [[META25:![0-9]+]] -; CHECK-NEXT: store i16 20, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 3), align 4, !noalias [[META23]] +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id() +; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META2]] +; CHECK-NEXT: [[TWO:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 1 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TWO]], align 4 +; CHECK-NEXT: [[TWO1:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3) +; CHECK-NEXT: store i32 [[VAL0]], ptr addrspace(3) [[TWO1]], align 4 +; CHECK-NEXT: store i16 20, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META11]] ; CHECK-NEXT: ret void ; %val0 = load i32, ptr addrspace(3) @both @@ -64,9 +77,10 @@ define void @func_two() { define amdgpu_kernel void @kern_two() { ; CHECK-LABEL: define {{[^@]+}}@kern_two -; CHECK-SAME: () #[[ATTR0]] { +; CHECK-SAME: () #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META18:![0-9]+]] { ; CHECK-NEXT: entry: -; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !alias.scope [[META26:![0-9]+]], !noalias [[META27:![0-9]+]] +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.kern_two.lds) ] +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !alias.scope [[META19:![0-9]+]], !noalias [[META20:![0-9]+]] ; CHECK-NEXT: call void @func_two() ; CHECK-NEXT: ret void ; @@ -82,11 +96,18 @@ entry: ; remains the best candidate for address zero allocation. define void @func_block_direct_allocation() { ; CHECK-LABEL: define {{[^@]+}}@func_block_direct_allocation() { -; CHECK-NEXT: [[VAL1:%.*]] = load i32, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META18]] -; CHECK-NEXT: [[VAL2:%.*]] = load i32, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 2), align 4, !noalias [[META25]] +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id() +; CHECK-NEXT: [[ONE:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[ONE]], align 4 +; CHECK-NEXT: [[ONE1:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3) +; CHECK-NEXT: [[VAL1:%.*]] = load i32, ptr addrspace(3) [[ONE1]], align 4 +; CHECK-NEXT: [[TWO:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 1 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TWO]], align 4 +; CHECK-NEXT: [[TWO2:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3) +; CHECK-NEXT: [[VAL2:%.*]] = load i32, ptr addrspace(3) [[TWO2]], align 4 ; CHECK-NEXT: [[SUM:%.*]] = add i32 [[VAL1]], [[VAL2]] -; CHECK-NEXT: store i32 [[SUM]], ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META1]] -; CHECK-NEXT: store i16 30, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 3), align 4, !noalias [[META23]] +; CHECK-NEXT: store i32 [[SUM]], ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META2]] +; CHECK-NEXT: store i16 30, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META11]] ; CHECK-NEXT: ret void ; %val1 = load i32, ptr addrspace(3) @one @@ -99,7 +120,8 @@ define void @func_block_direct_allocation() { define amdgpu_kernel void @kern_block_direct_allocation() { ; CHECK-LABEL: define {{[^@]+}}@kern_block_direct_allocation -; CHECK-SAME: () #[[ATTR0]] { +; CHECK-SAME: () #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META21:![0-9]+]] { +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.kern_block_direct_allocation.lds) ], !alias.scope [[META22:![0-9]+]], !noalias [[META25:![0-9]+]] ; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ] ; CHECK-NEXT: call void @func_block_direct_allocation() ; CHECK-NEXT: call void @func_one() @@ -112,35 +134,8 @@ define amdgpu_kernel void @kern_block_direct_allocation() { ret void } ;. -; CHECK: attributes #[[ATTR0]] = { "amdgpu-lds-size"="16" } -; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) } -;. -; CHECK: [[META0]] = !{i32 0, i32 1} -; CHECK: [[META1]] = !{[[META2:![0-9]+]], [[META4:![0-9]+]], [[META5:![0-9]+]], [[META6:![0-9]+]], [[META8:![0-9]+]], [[META9:![0-9]+]], [[META10:![0-9]+]], [[META12:![0-9]+]], [[META13:![0-9]+]], [[META14:![0-9]+]], [[META16:![0-9]+]], [[META17:![0-9]+]]} -; CHECK: [[META2]] = distinct !{[[META2]], [[META3:![0-9]+]]} -; CHECK: [[META3]] = distinct !{[[META3]]} -; CHECK: [[META4]] = distinct !{[[META4]], [[META3]]} -; CHECK: [[META5]] = distinct !{[[META5]], [[META3]]} -; CHECK: [[META6]] = distinct !{[[META6]], [[META7:![0-9]+]]} -; CHECK: [[META7]] = distinct !{[[META7]]} -; CHECK: [[META8]] = distinct !{[[META8]], [[META7]]} -; CHECK: [[META9]] = distinct !{[[META9]], [[META7]]} -; CHECK: [[META10]] = distinct !{[[META10]], [[META11:![0-9]+]]} -; CHECK: [[META11]] = distinct !{[[META11]]} -; CHECK: [[META12]] = distinct !{[[META12]], [[META11]]} -; CHECK: [[META13]] = distinct !{[[META13]], [[META11]]} -; CHECK: [[META14]] = distinct !{[[META14]], [[META15:![0-9]+]]} -; CHECK: [[META15]] = distinct !{[[META15]]} -; CHECK: [[META16]] = distinct !{[[META16]], [[META15]]} -; CHECK: [[META17]] = distinct !{[[META17]], [[META15]]} -; CHECK: [[META18]] = !{[[META19:![0-9]+]], [[META2]], [[META5]], [[META20:![0-9]+]], [[META6]], [[META9]], [[META21:![0-9]+]], [[META10]], [[META13]], [[META22:![0-9]+]], [[META14]], [[META17]]} -; CHECK: [[META19]] = distinct !{[[META19]], [[META3]]} -; CHECK: [[META20]] = distinct !{[[META20]], [[META7]]} -; CHECK: [[META21]] = distinct !{[[META21]], [[META11]]} -; CHECK: [[META22]] = distinct !{[[META22]], [[META15]]} -; CHECK: [[META23]] = !{[[META19]], [[META4]], [[META5]], [[META20]], [[META8]], [[META9]], [[META21]], [[META12]], [[META13]], [[META22]], [[META16]], [[META17]]} -; CHECK: [[META24]] = !{[[META10]], [[META12]], [[META13]], [[META14]], [[META16]], [[META17]]} -; CHECK: [[META25]] = !{[[META19]], [[META2]], [[META4]], [[META20]], [[META6]], [[META8]], [[META21]], [[META10]], [[META12]], [[META22]], [[META14]], [[META16]]} -; CHECK: [[META26]] = !{[[META22]]} -; CHECK: [[META27]] = !{[[META14]], [[META16]], [[META17]]} +; CHECK: attributes #[[ATTR0]] = { "amdgpu-lds-size"="12" } +; CHECK: attributes #[[ATTR1]] = { "amdgpu-lds-size"="16" } +; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) } +; CHECK: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } ;. diff --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll index dbcd370..08ec0c8 100644 --- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll +++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll @@ -1117,18 +1117,19 @@ define amdgpu_kernel void @mad_i64_i32_uniform(ptr addrspace(1) %out, i32 %arg0, ; ; GFX1250-LABEL: mad_i64_i32_uniform: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 -; GFX1250-NEXT: s_mov_b32 s7, 0 +; GFX1250-NEXT: s_mov_b32 s5, 0 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mov_b32 s6, s2 +; GFX1250-NEXT: s_mov_b32 s4, s2 ; GFX1250-NEXT: s_mov_b32 s2, s3 -; GFX1250-NEXT: s_mov_b32 s3, s7 +; GFX1250-NEXT: s_mov_b32 s3, s5 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: s_mul_u64 s[2:3], s[6:7], s[2:3] -; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], s[4:5] +; GFX1250-NEXT: s_mul_u64 s[2:3], s[4:5], s[2:3] +; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], s[6:7] ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] diff --git a/llvm/test/CodeGen/AMDGPU/mai-hazards.mir b/llvm/test/CodeGen/AMDGPU/mai-hazards.mir index 61f2629..c19d5a6 100644 --- a/llvm/test/CodeGen/AMDGPU/mai-hazards.mir +++ b/llvm/test/CodeGen/AMDGPU/mai-hazards.mir @@ -33,7 +33,7 @@ name: asm_write_vgpr_accvgpr_write_read body: | bb.0: - INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2228234 /* regdef:VGPR_32 */, def $vgpr0 + INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2031626 /* regdef:VGPR_32 */, def $vgpr0 $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec ... @@ -47,7 +47,7 @@ name: asm_write_vgpr_accvgpr_write_read_partialnop body: | bb.0: - INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2228234 /* regdef:VGPR_32 */, def $vgpr0 + INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2031626 /* regdef:VGPR_32 */, def $vgpr0 S_NOP 0 $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec ... @@ -60,7 +60,7 @@ name: asm_write_vgpr_accvgpr_write_read_otherreg body: | bb.0: liveins: $vgpr0 - INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2228234 /* regdef:VGPR_32 */, def $vgpr1 + INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2031626 /* regdef:VGPR_32 */, def $vgpr1 $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec ... diff --git a/llvm/test/CodeGen/AMDGPU/max.ll b/llvm/test/CodeGen/AMDGPU/max.ll index fef9a9a..ae08054 100644 --- a/llvm/test/CodeGen/AMDGPU/max.ll +++ b/llvm/test/CodeGen/AMDGPU/max.ll @@ -257,16 +257,15 @@ define amdgpu_kernel void @v_test_imax_sge_i8(ptr addrspace(1) %out, ptr addrspa ; ; GFX1250-LABEL: v_test_imax_sge_i8: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v0, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_load_i8 s2, s[2:3], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_i8 s3, s[4:5], 0x0 +; GFX1250-NEXT: s_load_i8 s4, s[2:3], 0x0 +; GFX1250-NEXT: s_load_i8 s5, s[6:7], 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_max_i32 s2, s2, s3 +; GFX1250-NEXT: s_max_i32 s2, s4, s5 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: v_mov_b32_e32 v1, s2 ; GFX1250-NEXT: global_store_b8 v0, v1, s[0:1] @@ -701,16 +700,15 @@ define amdgpu_kernel void @v_test_umax_uge_i8(ptr addrspace(1) %out, ptr addrspa ; ; GFX1250-LABEL: v_test_umax_uge_i8: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v0, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_load_u8 s2, s[2:3], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_u8 s3, s[4:5], 0x0 +; GFX1250-NEXT: s_load_u8 s4, s[2:3], 0x0 +; GFX1250-NEXT: s_load_u8 s5, s[6:7], 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_max_u32 s2, s2, s3 +; GFX1250-NEXT: s_max_u32 s2, s4, s5 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX1250-NEXT: v_mov_b32_e32 v1, s2 ; GFX1250-NEXT: global_store_b8 v0, v1, s[0:1] @@ -777,13 +775,12 @@ define amdgpu_kernel void @v_test_umax_ugt_i32(ptr addrspace(1) %out, ptr addrsp ; GFX1250-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset -; GFX1250-NEXT: s_load_b32 s2, s[0:1], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b32 s6, s[0:1], 0x0 +; GFX1250-NEXT: s_load_b64 s[2:3], s[4:5], 0x24 ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_max_u32_e32 v0, s2, v0 -; GFX1250-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1250-NEXT: v_max_u32_e32 v0, s6, v0 +; GFX1250-NEXT: global_store_b32 v1, v0, s[2:3] ; GFX1250-NEXT: s_endpgm ; ; EG-LABEL: v_test_umax_ugt_i32: @@ -1122,12 +1119,12 @@ define amdgpu_kernel void @test_umax_ugt_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_umax_ugt_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_max_u64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_max_u64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm ; @@ -1175,12 +1172,12 @@ define amdgpu_kernel void @test_umax_uge_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_umax_uge_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_max_u64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_max_u64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm ; @@ -1228,12 +1225,12 @@ define amdgpu_kernel void @test_imax_sgt_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_imax_sgt_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_max_i64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_max_i64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm ; @@ -1281,12 +1278,12 @@ define amdgpu_kernel void @test_imax_sge_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_imax_sge_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_max_i64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_max_i64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/min.ll b/llvm/test/CodeGen/AMDGPU/min.ll index 311527d..6a3d31f 100644 --- a/llvm/test/CodeGen/AMDGPU/min.ll +++ b/llvm/test/CodeGen/AMDGPU/min.ll @@ -131,14 +131,14 @@ define amdgpu_kernel void @v_test_imin_sle_i32(ptr addrspace(1) %out, ptr addrsp ; ; GFX1250-LABEL: v_test_imin_sle_i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset -; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset +; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_min_i32_e32 v1, v1, v2 ; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset @@ -1172,14 +1172,14 @@ define amdgpu_kernel void @s_test_imin_sle_v4i16(ptr addrspace(1) %out, <4 x i16 ; ; GFX1250-LABEL: s_test_imin_sle_v4i16: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x0 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x0 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: v_pk_min_i16 v1, s1, s3 ; GFX1250-NEXT: v_pk_min_i16 v0, s0, s2 -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[4:5] +; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[6:7] ; GFX1250-NEXT: s_endpgm %cmp = icmp sle <4 x i16> %a, %b %val = select <4 x i1> %cmp, <4 x i16> %a, <4 x i16> %b @@ -1307,14 +1307,14 @@ define amdgpu_kernel void @v_test_imin_slt_i32(ptr addrspace(1) %out, ptr addrsp ; ; GFX1250-LABEL: v_test_imin_slt_i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset -; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset +; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_min_i32_e32 v1, v1, v2 ; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset @@ -1484,14 +1484,14 @@ define amdgpu_kernel void @v_test_imin_slt_i16(ptr addrspace(1) %out, ptr addrsp ; ; GFX1250-LABEL: v_test_imin_slt_i16: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_u16 v1, v0, s[2:3] scale_offset -; GFX1250-NEXT: global_load_u16 v2, v0, s[4:5] scale_offset +; GFX1250-NEXT: global_load_u16 v2, v0, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_min_i16 v1, v1, v2 ; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] scale_offset @@ -1686,16 +1686,16 @@ define amdgpu_kernel void @s_test_imin_slt_v2i32(ptr addrspace(1) %out, <2 x i32 ; ; GFX1250-LABEL: s_test_imin_slt_v2i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x8 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x0 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x0 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_min_i32 s0, s0, s2 ; GFX1250-NEXT: s_min_i32 s1, s1, s3 ; GFX1250-NEXT: v_mov_b32_e32 v0, s0 ; GFX1250-NEXT: v_mov_b32_e32 v1, s1 -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[4:5] +; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[6:7] ; GFX1250-NEXT: s_endpgm %cmp = icmp slt <2 x i32> %a, %b %val = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %b @@ -2011,14 +2011,14 @@ define amdgpu_kernel void @v_test_umin_ule_i32(ptr addrspace(1) %out, ptr addrsp ; ; GFX1250-LABEL: v_test_umin_ule_i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset -; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset +; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_min_u32_e32 v1, v1, v2 ; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset @@ -2171,16 +2171,16 @@ define amdgpu_kernel void @v_test_umin_ule_v3i32(ptr addrspace(1) %out, ptr addr ; ; GFX1250-LABEL: v_test_umin_ule_v3i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 4, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b96 v[0:2], v3, s[2:3] -; GFX1250-NEXT: global_load_b96 v[4:6], v3, s[4:5] +; GFX1250-NEXT: global_load_b96 v[4:6], v3, s[6:7] ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_min_u32_e32 v2, v2, v6 ; GFX1250-NEXT: v_min_u32_e32 v1, v1, v5 @@ -2374,14 +2374,14 @@ define amdgpu_kernel void @v_test_umin_ule_v3i16(ptr addrspace(1) %out, ptr addr ; ; GFX1250-LABEL: v_test_umin_ule_v3i16: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[2:3] scale_offset -; GFX1250-NEXT: global_load_b64 v[2:3], v4, s[4:5] scale_offset +; GFX1250-NEXT: global_load_b64 v[2:3], v4, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_xcnt 0x0 ; GFX1250-NEXT: v_lshlrev_b32_e32 v4, 3, v4 ; GFX1250-NEXT: s_wait_loadcnt 0x0 @@ -2611,14 +2611,14 @@ define amdgpu_kernel void @v_test_umin_ult_i32(ptr addrspace(1) %out, ptr addrsp ; ; GFX1250-LABEL: v_test_umin_ult_i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset -; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset +; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_min_u32_e32 v1, v1, v2 ; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset @@ -2771,14 +2771,14 @@ define amdgpu_kernel void @v_test_umin_ult_i8(ptr addrspace(1) %out, ptr addrspa ; ; GFX1250-LABEL: v_test_umin_ult_i8: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_u8 v1, v0, s[2:3] -; GFX1250-NEXT: global_load_u8 v2, v0, s[4:5] +; GFX1250-NEXT: global_load_u8 v2, v0, s[6:7] ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_min_u16 v1, v1, v2 ; GFX1250-NEXT: global_store_b8 v0, v1, s[0:1] @@ -3023,23 +3023,22 @@ define amdgpu_kernel void @v_test_umin_ult_i32_multi_use(ptr addrspace(1) %out0, ; ; GFX1250-LABEL: v_test_umin_ult_i32_multi_use: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x0 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x0 ; GFX1250-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_load_b32 s4, s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b32 s5, s[6:7], 0x0 +; GFX1250-NEXT: s_load_b32 s0, s[12:13], 0x0 +; GFX1250-NEXT: s_load_b32 s1, s[14:15], 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_cmp_lt_u32 s4, s5 -; GFX1250-NEXT: s_cselect_b32 s6, -1, 0 +; GFX1250-NEXT: s_cmp_lt_u32 s0, s1 +; GFX1250-NEXT: s_cselect_b32 s2, -1, 0 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, s6 -; GFX1250-NEXT: s_and_b32 s6, s6, exec_lo -; GFX1250-NEXT: s_cselect_b32 s4, s4, s5 -; GFX1250-NEXT: v_mov_b32_e32 v2, s4 +; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2 +; GFX1250-NEXT: s_and_b32 s2, s2, exec_lo +; GFX1250-NEXT: s_cselect_b32 s0, s0, s1 +; GFX1250-NEXT: v_mov_b32_e32 v2, s0 ; GFX1250-NEXT: s_clause 0x1 -; GFX1250-NEXT: global_store_b32 v1, v2, s[0:1] -; GFX1250-NEXT: global_store_b8 v1, v0, s[2:3] +; GFX1250-NEXT: global_store_b32 v1, v2, s[8:9] +; GFX1250-NEXT: global_store_b8 v1, v0, s[10:11] ; GFX1250-NEXT: s_endpgm %a = load i32, ptr addrspace(1) %aptr, align 4 %b = load i32, ptr addrspace(1) %bptr, align 4 @@ -3220,12 +3219,12 @@ define amdgpu_kernel void @v_test_umin_ult_i16_multi_use(ptr addrspace(1) %out0, ; ; GFX1250-LABEL: v_test_umin_ult_i16_multi_use: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x0 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x0 ; GFX1250-NEXT: v_mov_b32_e32 v0, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 -; GFX1250-NEXT: global_load_u16 v1, v0, s[6:7] -; GFX1250-NEXT: global_load_u16 v2, v0, s[4:5] +; GFX1250-NEXT: global_load_u16 v1, v0, s[14:15] +; GFX1250-NEXT: global_load_u16 v2, v0, s[12:13] ; GFX1250-NEXT: s_wait_loadcnt 0x1 ; GFX1250-NEXT: v_and_b32_e32 v3, 0xffff, v1 ; GFX1250-NEXT: s_wait_loadcnt 0x0 @@ -3235,8 +3234,8 @@ define amdgpu_kernel void @v_test_umin_ult_i16_multi_use(ptr addrspace(1) %out0, ; GFX1250-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo ; GFX1250-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo ; GFX1250-NEXT: s_clause 0x1 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: global_store_b8 v0, v2, s[2:3] +; GFX1250-NEXT: global_store_b16 v0, v1, s[8:9] +; GFX1250-NEXT: global_store_b8 v0, v2, s[10:11] ; GFX1250-NEXT: s_endpgm %a = load i16, ptr addrspace(1) %aptr, align 2 %b = load i16, ptr addrspace(1) %bptr, align 2 @@ -4338,12 +4337,12 @@ define amdgpu_kernel void @test_umin_ult_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_umin_ult_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_min_u64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_min_u64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm %tmp = icmp ult i64 %a, %b @@ -4462,12 +4461,12 @@ define amdgpu_kernel void @test_umin_ule_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_umin_ule_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_min_u64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_min_u64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm %tmp = icmp ule i64 %a, %b @@ -4586,12 +4585,12 @@ define amdgpu_kernel void @test_imin_slt_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_imin_slt_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_min_i64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_min_i64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm %tmp = icmp slt i64 %a, %b @@ -4710,12 +4709,12 @@ define amdgpu_kernel void @test_imin_sle_i64(ptr addrspace(1) %out, i64 %a, i64 ; ; GFX1250-LABEL: test_imin_sle_i64: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_min_i64 v[0:1], s[2:3], s[4:5] +; GFX1250-NEXT: v_min_i64 v[0:1], s[2:3], s[6:7] ; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-NEXT: s_endpgm %tmp = icmp sle i64 %a, %b @@ -4872,14 +4871,14 @@ define amdgpu_kernel void @v_test_imin_sle_v2i16(ptr addrspace(1) %out, ptr addr ; ; GFX1250-LABEL: v_test_imin_sle_v2i16: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset -; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset +; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_pk_min_i16 v1, v1, v2 ; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset @@ -5042,14 +5041,14 @@ define amdgpu_kernel void @v_test_imin_ule_v2i16(ptr addrspace(1) %out, ptr addr ; ; GFX1250-LABEL: v_test_imin_ule_v2i16: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: global_load_b32 v1, v0, s[2:3] scale_offset -; GFX1250-NEXT: global_load_b32 v2, v0, s[4:5] scale_offset +; GFX1250-NEXT: global_load_b32 v2, v0, s[6:7] scale_offset ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_pk_min_u16 v1, v1, v2 ; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] scale_offset diff --git a/llvm/test/CodeGen/AMDGPU/mul.ll b/llvm/test/CodeGen/AMDGPU/mul.ll index baccb4c..d29847e 100644 --- a/llvm/test/CodeGen/AMDGPU/mul.ll +++ b/llvm/test/CodeGen/AMDGPU/mul.ll @@ -450,6 +450,7 @@ define amdgpu_kernel void @s_trunc_i64_mul_to_i32(ptr addrspace(1) %out, i64 %a, ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_load_b32 s3, s[4:5], 0x34 +; GFX1250-NEXT: ; kill: killed $sgpr4_sgpr5 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_mul_i32 s2, s3, s2 ; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 @@ -613,25 +614,25 @@ define amdgpu_kernel void @v_trunc_i64_mul_to_i32(ptr addrspace(1) %out, ptr add ; ; GFX1250-LABEL: v_trunc_i64_mul_to_i32: ; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s6, s10 -; GFX1250-NEXT: s_mov_b32 s7, s11 +; GFX1250-NEXT: s_load_b64 s[8:9], s[4:5], 0x34 +; GFX1250-NEXT: s_mov_b32 s6, -1 +; GFX1250-NEXT: s_mov_b32 s7, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s14, s6 +; GFX1250-NEXT: s_mov_b32 s15, s7 +; GFX1250-NEXT: s_mov_b32 s10, s6 +; GFX1250-NEXT: s_mov_b32 s11, s7 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_mov_b32 s12, s2 ; GFX1250-NEXT: s_mov_b32 s13, s3 ; GFX1250-NEXT: buffer_load_b32 v0, off, s[12:15], null -; GFX1250-NEXT: buffer_load_b32 v1, off, s[4:7], null -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: buffer_load_b32 v1, off, s[8:11], null +; GFX1250-NEXT: s_mov_b32 s4, s0 +; GFX1250-NEXT: s_mov_b32 s5, s1 ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_mul_lo_u32 v0, v1, v0 -; GFX1250-NEXT: buffer_store_b32 v0, off, s[8:11], null +; GFX1250-NEXT: buffer_store_b32 v0, off, s[4:7], null ; GFX1250-NEXT: s_endpgm ; ; EG-LABEL: v_trunc_i64_mul_to_i32: @@ -2091,11 +2092,11 @@ define amdgpu_kernel void @s_mul_i64(ptr addrspace(1) %out, i64 %a, i64 %b) noun ; ; GFX1250-LABEL: s_mul_i64: ; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_mul_u64 s[4:5], s[2:3], s[4:5] +; GFX1250-NEXT: s_mul_u64 s[4:5], s[2:3], s[6:7] ; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 ; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[4:5] ; GFX1250-NEXT: s_mov_b32 s2, -1 @@ -2292,25 +2293,25 @@ define amdgpu_kernel void @v_mul_i64(ptr addrspace(1) %out, ptr addrspace(1) %ap ; ; GFX1250-LABEL: v_mul_i64: ; GFX1250: ; %bb.0: ; %entry +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 -; GFX1250-NEXT: s_mov_b32 s10, -1 -; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s14, s10 -; GFX1250-NEXT: s_mov_b32 s15, s11 -; GFX1250-NEXT: s_mov_b32 s6, s10 -; GFX1250-NEXT: s_mov_b32 s7, s11 +; GFX1250-NEXT: s_load_b64 s[8:9], s[4:5], 0x34 +; GFX1250-NEXT: s_mov_b32 s6, -1 +; GFX1250-NEXT: s_mov_b32 s7, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s14, s6 +; GFX1250-NEXT: s_mov_b32 s15, s7 +; GFX1250-NEXT: s_mov_b32 s10, s6 +; GFX1250-NEXT: s_mov_b32 s11, s7 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_mov_b32 s12, s2 ; GFX1250-NEXT: s_mov_b32 s13, s3 ; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[12:15], null -; GFX1250-NEXT: buffer_load_b64 v[2:3], off, s[4:7], null -; GFX1250-NEXT: s_mov_b32 s8, s0 -; GFX1250-NEXT: s_mov_b32 s9, s1 +; GFX1250-NEXT: buffer_load_b64 v[2:3], off, s[8:11], null +; GFX1250-NEXT: s_mov_b32 s4, s0 +; GFX1250-NEXT: s_mov_b32 s5, s1 ; GFX1250-NEXT: s_wait_loadcnt 0x0 ; GFX1250-NEXT: v_mul_u64_e32 v[0:1], v[0:1], v[2:3] -; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[8:11], null +; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[4:7], null ; GFX1250-NEXT: s_endpgm ; ; EG-LABEL: v_mul_i64: @@ -2845,30 +2846,30 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace( ; ; GFX1250-LABEL: mul64_in_branch: ; GFX1250: ; %bb.0: ; %entry -; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1250-NEXT: s_cmp_lg_u64 s[12:13], 0 ; GFX1250-NEXT: s_cbranch_scc0 .LBB16_3 ; GFX1250-NEXT: ; %bb.1: ; %else -; GFX1250-NEXT: s_mul_u64 s[4:5], s[4:5], s[6:7] +; GFX1250-NEXT: s_mul_u64 s[0:1], s[12:13], s[14:15] ; GFX1250-NEXT: s_cbranch_execnz .LBB16_4 ; GFX1250-NEXT: .LBB16_2: ; %if -; GFX1250-NEXT: s_mov_b32 s7, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s6, -1 -; GFX1250-NEXT: s_mov_b32 s4, s2 -; GFX1250-NEXT: s_mov_b32 s5, s3 -; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[4:7], null +; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s0, s10 +; GFX1250-NEXT: s_mov_b32 s1, s11 +; GFX1250-NEXT: buffer_load_b64 v[0:1], off, s[0:3], null ; GFX1250-NEXT: s_branch .LBB16_5 ; GFX1250-NEXT: .LBB16_3: -; GFX1250-NEXT: ; implicit-def: $sgpr4_sgpr5 +; GFX1250-NEXT: ; implicit-def: $sgpr0_sgpr1 ; GFX1250-NEXT: s_branch .LBB16_2 ; GFX1250-NEXT: .LBB16_4: -; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[4:5] +; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1] ; GFX1250-NEXT: .LBB16_5: ; %endif -; GFX1250-NEXT: s_mov_b32 s3, 0x31016000 -; GFX1250-NEXT: s_mov_b32 s2, -1 +; GFX1250-NEXT: s_mov_b32 s11, 0x31016000 +; GFX1250-NEXT: s_mov_b32 s10, -1 ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null +; GFX1250-NEXT: buffer_store_b64 v[0:1], off, s[8:11], null ; GFX1250-NEXT: s_endpgm ; ; EG-LABEL: mul64_in_branch: diff --git a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll index b0651ef..78207c2 100644 --- a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll +++ b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll @@ -340,46 +340,46 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; ; GFX1250-SDAG-LABEL: fadd_v32_vs: ; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[34:35] offset:16 -; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:48 -; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 -; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[34:35] -; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:80 -; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:96 -; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:64 -; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 -; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 -; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 -; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[0:1] offset:16 +; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:48 +; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32 +; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[0:1] +; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:80 +; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:96 +; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:64 +; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112 +; GFX1250-SDAG-NEXT: s_clause 0x1 +; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4 +; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s20 :: v_dual_mov_b32 v33, s21 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s22 :: v_dual_mov_b32 v35, s23 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s18 :: v_dual_mov_b32 v39, s29 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v40, s30 :: v_dual_mov_b32 v41, s31 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s24 :: v_dual_mov_b32 v37, s19 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s28 :: v_dual_mov_b32 v55, s15 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s3 :: v_dual_mov_b32 v52, s12 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s13 :: v_dual_mov_b32 v54, s14 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s7 :: v_dual_mov_b32 v50, s2 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s27 :: v_dual_mov_b32 v46, s4 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s5 :: v_dual_mov_b32 v48, s6 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v43, s25 :: v_dual_mov_b32 v44, s26 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s40 :: v_dual_mov_b32 v33, s41 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s42 :: v_dual_mov_b32 v35, s43 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s38 :: v_dual_mov_b32 v39, s49 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v40, s50 :: v_dual_mov_b32 v41, s51 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s44 :: v_dual_mov_b32 v37, s39 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s48 :: v_dual_mov_b32 v55, s23 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s11 :: v_dual_mov_b32 v52, s20 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s21 :: v_dual_mov_b32 v54, s22 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s15 :: v_dual_mov_b32 v50, s10 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s47 :: v_dual_mov_b32 v46, s12 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s13 :: v_dual_mov_b32 v48, s14 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v43, s45 :: v_dual_mov_b32 v44, s46 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[32:33] ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[34:35] -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s8 :: v_dual_mov_b32 v33, s9 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s10 :: v_dual_mov_b32 v35, s11 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s16 :: v_dual_mov_b32 v33, s17 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s18 :: v_dual_mov_b32 v35, s19 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6 ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[6:7], v[6:7], v[40:41] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[0:1] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[8:9] ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[4:5], v[4:5], v[38:39] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[16:17] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[36:37] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2 ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[20:21], v[20:21], v[32:33] ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[22:23], v[22:23], v[34:35] @@ -395,58 +395,58 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[14:15], v[14:15], v[36:37] ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[12:13], v[12:13], v[38:39] ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:96 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:64 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:80 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:48 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[34:35] -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[34:35] offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:96 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:64 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:80 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:48 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[0:1] +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[0:1] offset:16 ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_v32_vs: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 +; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35] -; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 -; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 -; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 -; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[0:1] +; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:16 +; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32 +; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[0:1] offset:48 +; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:64 +; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:80 +; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:96 +; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112 +; GFX1250-GISEL-NEXT: s_clause 0x1 +; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4 +; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[36:37] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[38:39] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[40:41] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[42:43] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[44:45] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[46:47] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[48:49] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[50:51] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[14:15] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[32:33] ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[34:35] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6 ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[4:5], v[4:5], v[36:37] ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[6:7], v[6:7], v[38:39] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5 ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[40:41] ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[10:11], v[10:11], v[42:43] @@ -466,14 +466,14 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[28:29], v[28:29], v[36:37] ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[30:31], v[30:31], v[38:39] ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35] -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[0:1] +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:16 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[0:1] offset:48 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:64 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:80 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:96 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112 ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id @@ -1597,46 +1597,46 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; ; GFX1250-SDAG-LABEL: fmul_v32_vs: ; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[34:35] offset:16 -; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:48 -; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 -; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[34:35] -; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:80 -; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:96 -; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:64 -; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 -; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 -; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 -; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[0:1] offset:16 +; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:48 +; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32 +; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[0:1] +; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:80 +; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:96 +; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:64 +; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112 +; GFX1250-SDAG-NEXT: s_clause 0x1 +; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4 +; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s20 :: v_dual_mov_b32 v33, s21 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s22 :: v_dual_mov_b32 v35, s23 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s18 :: v_dual_mov_b32 v39, s29 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v40, s30 :: v_dual_mov_b32 v41, s31 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s24 :: v_dual_mov_b32 v37, s19 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s28 :: v_dual_mov_b32 v55, s15 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s3 :: v_dual_mov_b32 v52, s12 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s13 :: v_dual_mov_b32 v54, s14 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s7 :: v_dual_mov_b32 v50, s2 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s27 :: v_dual_mov_b32 v46, s4 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s5 :: v_dual_mov_b32 v48, s6 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v43, s25 :: v_dual_mov_b32 v44, s26 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s40 :: v_dual_mov_b32 v33, s41 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s42 :: v_dual_mov_b32 v35, s43 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s38 :: v_dual_mov_b32 v39, s49 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v40, s50 :: v_dual_mov_b32 v41, s51 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s44 :: v_dual_mov_b32 v37, s39 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s48 :: v_dual_mov_b32 v55, s23 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s11 :: v_dual_mov_b32 v52, s20 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s21 :: v_dual_mov_b32 v54, s22 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s15 :: v_dual_mov_b32 v50, s10 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s47 :: v_dual_mov_b32 v46, s12 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s13 :: v_dual_mov_b32 v48, s14 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v43, s45 :: v_dual_mov_b32 v44, s46 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[32:33] ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[34:35] -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s8 :: v_dual_mov_b32 v33, s9 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s10 :: v_dual_mov_b32 v35, s11 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s16 :: v_dual_mov_b32 v33, s17 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s18 :: v_dual_mov_b32 v35, s19 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6 ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[40:41] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[0:1] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[8:9] ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[38:39] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[16:17] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[36:37] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2 ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[20:21], v[20:21], v[32:33] ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[22:23], v[22:23], v[34:35] @@ -1652,58 +1652,58 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[14:15], v[14:15], v[36:37] ; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[12:13], v[12:13], v[38:39] ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:96 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:64 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:80 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:48 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[34:35] -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[34:35] offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:96 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:64 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:80 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:48 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[0:1] +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[0:1] offset:16 ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fmul_v32_vs: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 +; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35] -; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 -; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 -; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 -; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[0:1] +; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:16 +; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32 +; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[0:1] offset:48 +; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:64 +; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:80 +; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:96 +; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112 +; GFX1250-GISEL-NEXT: s_clause 0x1 +; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4 +; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[36:37] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[38:39] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[40:41] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[42:43] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[44:45] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[46:47] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[48:49] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[50:51] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[14:15] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[32:33] ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[34:35] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6 ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[36:37] ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[38:39] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5 ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[8:9], v[8:9], v[40:41] ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[10:11], v[10:11], v[42:43] @@ -1723,14 +1723,14 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[28:29], v[28:29], v[36:37] ; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[30:31], v[30:31], v[38:39] ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35] -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[0:1] +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:16 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[0:1] offset:48 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:64 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:80 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:96 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112 ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id @@ -2428,46 +2428,46 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; ; GFX1250-SDAG-LABEL: fma_v32_vs: ; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[34:35] offset:16 -; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:48 -; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 -; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[34:35] -; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:80 -; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:96 -; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:64 -; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 -; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 -; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 -; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v56, s[0:1] offset:16 +; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:48 +; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32 +; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v56, s[0:1] +; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:80 +; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:96 +; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:64 +; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112 +; GFX1250-SDAG-NEXT: s_clause 0x1 +; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4 +; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[20:21] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[34:35], s[22:23] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[30:31] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[28:29] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[52:53], s[12:13] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[54:55], s[14:15] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[50:51], s[2:3] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[46:47], s[4:5] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[48:49], s[6:7] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[24:25] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[44:45], s[26:27] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[18:19] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[40:41] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[34:35], s[42:43] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[50:51] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[48:49] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[52:53], s[20:21] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[54:55], s[22:23] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[50:51], s[10:11] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[46:47], s[12:13] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[48:49], s[14:15] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[44:45] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[44:45], s[46:47] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[38:39] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[32:33], v[32:33] ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[34:35], v[34:35] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[8:9] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[34:35], s[10:11] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[16:17] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[34:35], s[18:19] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6 ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[6:7], v[6:7], v[40:41], v[40:41] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[0:1] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[8:9] ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[4:5], v[4:5], v[38:39], v[38:39] -; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[16:17] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[36:37] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[28:29], v[28:29], v[52:53], v[52:53] ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[20:21], v[20:21], v[32:33], v[32:33] @@ -2482,58 +2482,58 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[14:15], v[14:15], v[36:37], v[36:37] ; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[12:13], v[12:13], v[38:39], v[38:39] ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:96 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:64 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:80 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:48 -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[34:35] -; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[34:35] offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:96 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:64 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:80 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:48 +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[12:15], s[0:1] +; GFX1250-SDAG-NEXT: global_store_b128 v56, v[0:3], s[0:1] offset:16 ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fma_v32_vs: ; GFX1250-GISEL: ; %bb.0: -; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 +; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35] -; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 -; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 -; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 -; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[0:1] +; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[0:1] offset:16 +; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[0:1] offset:32 +; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[0:1] offset:48 +; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[0:1] offset:64 +; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[0:1] offset:80 +; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[0:1] offset:96 +; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[0:1] offset:112 +; GFX1250-GISEL-NEXT: s_clause 0x1 +; GFX1250-GISEL-NEXT: s_load_b512 s[36:51], s[4:5], 0xa4 +; GFX1250-GISEL-NEXT: s_load_b512 s[8:23], s[4:5], 0xe4 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[36:37] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[38:39] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[40:41] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[42:43] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[44:45] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[46:47] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[48:49] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[50:51] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[14:15] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[32:33], v[32:33] ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[34:35], v[34:35] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6 ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[4:5], v[4:5], v[36:37], v[36:37] ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[6:7], v[6:7], v[38:39], v[38:39] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13] -; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5 ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[8:9], v[8:9], v[40:41], v[40:41] ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[10:11], v[10:11], v[42:43], v[42:43] @@ -2553,14 +2553,14 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[28:29], v[28:29], v[36:37], v[36:37] ; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[30:31], v[30:31], v[38:39], v[38:39] ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35] -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[0:1] +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[0:1] offset:16 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[0:1] offset:32 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[0:1] offset:48 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[0:1] offset:64 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[0:1] offset:80 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[0:1] offset:96 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[0:1] offset:112 ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id @@ -3529,9 +3529,9 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p ; ; GFX1250-SDAG-LABEL: fadd_fadd_fsub: ; GFX1250-SDAG: ; %bb.0: ; %bb +; GFX1250-SDAG-NEXT: s_clause 0x1 ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 -; GFX1250-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[2:3] ; GFX1250-SDAG-NEXT: s_add_f32 s2, s1, s3 @@ -3541,14 +3541,14 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, v2 ; GFX1250-SDAG-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[4:5], v[0:1] neg_lo:[0,1] neg_hi:[0,1] -; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[4:5] +; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[6:7] ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_fadd_fsub: ; GFX1250-GISEL: ; %bb.0: ; %bb +; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 -; GFX1250-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x34 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1] ; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] @@ -3560,7 +3560,7 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[2:3], v[0:1] ; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_subrev_f32 v3, s3, v0 ; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-GISEL-NEXT: global_store_b64 v0, v[2:3], s[4:5] +; GFX1250-GISEL-NEXT: global_store_b64 v0, v[2:3], s[6:7] ; GFX1250-GISEL-NEXT: s_endpgm bb: %i12 = fadd <2 x float> %arg, %arg1 diff --git a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll index 6509d80..f88b1bf 100644 --- a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll +++ b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll @@ -12,7 +12,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 { ; REGALLOC-GFX908-NEXT: liveins: $sgpr4_sgpr5 ; REGALLOC-GFX908-NEXT: {{ $}} ; REGALLOC-GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef %6:agpr_32 - ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7798794 /* regdef:VReg_128 */, def %25 + ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7929866 /* regdef:VReg_128 */, def %25 ; REGALLOC-GFX908-NEXT: [[COPY:%[0-9]+]]:av_128 = COPY %25 ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3735562 /* regdef:VReg_64 */, def %27 ; REGALLOC-GFX908-NEXT: SI_SPILL_AV64_SAVE %27, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5) @@ -37,7 +37,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 { ; PEI-GFX908-NEXT: $sgpr12 = S_ADD_U32 $sgpr12, $sgpr9, implicit-def $scc, implicit-def $sgpr12_sgpr13_sgpr14_sgpr15 ; PEI-GFX908-NEXT: $sgpr13 = S_ADDC_U32 $sgpr13, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr12_sgpr13_sgpr14_sgpr15 ; PEI-GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef renamable $agpr0 - ; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7798794 /* regdef:VReg_128 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3 + ; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7929866 /* regdef:VReg_128 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3 ; PEI-GFX908-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, implicit $exec ; PEI-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3735562 /* regdef:VReg_64 */, def renamable $vgpr0_vgpr1 ; PEI-GFX908-NEXT: BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr12_sgpr13_sgpr14_sgpr15, 0, 0, 0, 0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $vgpr0_vgpr1 :: (store (s32) into %stack.0, addrspace 5) @@ -61,7 +61,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 { ; REGALLOC-GFX90A-NEXT: liveins: $sgpr4_sgpr5 ; REGALLOC-GFX90A-NEXT: {{ $}} ; REGALLOC-GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef %6:agpr_32 - ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7995402 /* regdef:VReg_128_Align2 */, def %23 + ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8257546 /* regdef:VReg_128_Align2 */, def %23 ; REGALLOC-GFX90A-NEXT: [[COPY:%[0-9]+]]:av_128_align2 = COPY %23 ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3997706 /* regdef:VReg_64_Align2 */, def %21 ; REGALLOC-GFX90A-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY %21 @@ -80,7 +80,7 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 { ; PEI-GFX90A-NEXT: liveins: $sgpr4_sgpr5 ; PEI-GFX90A-NEXT: {{ $}} ; PEI-GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, undef renamable $agpr0 - ; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 7995402 /* regdef:VReg_128_Align2 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3 + ; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 8257546 /* regdef:VReg_128_Align2 */, def renamable $vgpr0_vgpr1_vgpr2_vgpr3 ; PEI-GFX90A-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, implicit $exec ; PEI-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3997706 /* regdef:VReg_64_Align2 */, def renamable $vgpr2_vgpr3 ; PEI-GFX90A-NEXT: GLOBAL_STORE_DWORDX4 undef renamable $vgpr0_vgpr1, killed renamable $agpr0_agpr1_agpr2_agpr3, 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) poison`, addrspace 1) diff --git a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll index b717f85..6671201 100644 --- a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll +++ b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll @@ -186,12 +186,12 @@ define amdgpu_kernel void @mixed_inreg_block_count_x(ptr addrspace(1) %out, i32 ; ; GFX1250-LABEL: mixed_inreg_block_count_x: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b32 s2, s[0:1], 0x10 -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[0:1], s[0:1], 0x0 +; GFX1250-NEXT: s_clause 0x1 +; GFX1250-NEXT: s_load_b32 s4, s[0:1], 0x10 +; GFX1250-NEXT: s_load_b64 s[2:3], s[0:1], 0x0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] ; GFX1250-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %load = load i32, ptr addrspace(4) %imp_arg_ptr diff --git a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll index 4d367ef..c1764c9 100644 --- a/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll +++ b/llvm/test/CodeGen/AMDGPU/preload-kernargs.ll @@ -346,10 +346,10 @@ define amdgpu_kernel void @byref_preload_arg(ptr addrspace(1) inreg %out, ptr ad ; ; GFX1250-LABEL: byref_preload_arg: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[0:1], 0x100 +; GFX1250-NEXT: s_load_b64 s[4:5], s[0:1], 0x100 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 -; GFX1250-NEXT: v_mov_b32_e32 v2, s1 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: v_mov_b32_e32 v2, s5 ; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_storecnt 0x0 ; GFX1250-NEXT: global_store_b32 v0, v2, s[2:3] scope:SCOPE_SYS @@ -404,10 +404,10 @@ define amdgpu_kernel void @byref_staggered_preload_arg(ptr addrspace(1) inreg %o ; ; GFX1250-LABEL: byref_staggered_preload_arg: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[0:1], 0x100 +; GFX1250-NEXT: s_load_b64 s[4:5], s[0:1], 0x100 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0 -; GFX1250-NEXT: v_mov_b32_e32 v2, s1 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-NEXT: v_mov_b32_e32 v2, s5 ; GFX1250-NEXT: global_store_b32 v0, v1, s[2:3] scope:SCOPE_SYS ; GFX1250-NEXT: s_wait_storecnt 0x0 ; GFX1250-NEXT: global_store_b32 v0, v2, s[2:3] scope:SCOPE_SYS diff --git a/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir b/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir index b32e997..80afe7a 100644 --- a/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir +++ b/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir @@ -43,17 +43,17 @@ machineFunctionInfo: body: | bb.0: - INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10 /* regdef */, implicit-def $agpr0 + INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 2424842 /* regdef:AGPR_32 */, implicit-def $agpr0 %14:vgpr_32 = COPY killed $agpr0 - INLINEASM &"; def $0 $1 $2 $3 $4", 1 /* sideeffect attdialect */, 27394058 /* regdef:VReg_512 */, def %7, 13697034 /* regdef:VReg_256 */, def %8, 6225930 /* regdef:VReg_128 */, def %9, 4915210 /* regdef:VReg_96 */, def %10, 4915210 /* regdef:VReg_96 */, def %11 + INLINEASM &"; def $0 $1 $2 $3 $4", 1 /* sideeffect attdialect */, 40042506 /* regdef:VReg_512 */, def %7, 19464202 /* regdef:VReg_256 */, def %8, 7929866 /* regdef:VReg_128 */, def %9, 5963786 /* regdef:VReg_96 */, def %10, 5963786 /* regdef:VReg_96 */, def %11 INLINEASM &"; clobber", 1 /* sideeffect attdialect */, 12 /* clobber */, implicit-def dead early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15_agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, 12 /* clobber */, implicit-def dead early-clobber $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27394057 /* reguse:VReg_512 */, %7 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 13697033 /* reguse:VReg_256 */, %8 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 6225929 /* reguse:VReg_128 */, %9 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:VReg_96 */, %10 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:VReg_96 */, %11 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 40042505 /* reguse:VReg_512 */, %7 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 19464201 /* reguse:VReg_256 */, %8 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 7929865 /* reguse:VReg_128 */, %9 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5963785 /* reguse:VReg_96 */, %10 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5963785 /* reguse:VReg_96 */, %11 $agpr1 = COPY %14 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9 /* reguse */, killed $agpr1 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2424841 /* reguse:AGPR_32 */, killed $agpr1 SI_RETURN ... diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir index 1b09f5d..ad490f8 100644 --- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-copy-from.mir @@ -41,9 +41,9 @@ body: | ; CHECK-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY $vgpr0_vgpr1 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY $vgpr2_vgpr3 ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1) - ; CHECK-NEXT: [[COPY3:%[0-9]+]]:areg_128_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]] - ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[COPY3]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3801097 /* reguse:AV_64_Align2 */, [[V_MFMA_F64_4X4X4F64_e64_]] + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vreg_128_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]] + ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_vgprcd_e64_:%[0-9]+]]:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], [[COPY3]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3997705 /* reguse:VReg_64_Align2 */, [[V_MFMA_F64_4X4X4F64_vgprcd_e64_]] ; CHECK-NEXT: SI_RETURN %0:vreg_64_align2 = COPY $vgpr4_vgpr5 %1:av_64_align2 = COPY $vgpr0_vgpr1 @@ -51,7 +51,7 @@ body: | %3:areg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1) %4:vreg_128_align2 = COPY %3 %5:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %4.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3801097 /* reguse:VReg_64_Align2 */, %5 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3997705 /* reguse:VReg_64_Align2 */, %5 SI_RETURN ... diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir index d7b713a..0b4e662 100644 --- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-insert-extract.mir @@ -19,7 +19,7 @@ body: | ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX2_]], 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]] ; CHECK-NEXT: [[COPY3:%[0-9]+]].sub2_sub3:areg_128_align2 = IMPLICIT_DEF - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]].sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: SI_RETURN @@ -30,7 +30,7 @@ body: | %4:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3, 0, 0, 0, implicit $mode, implicit $exec undef %5.sub0_sub1:areg_128_align2 = COPY %4 %5.sub2_sub3 = IMPLICIT_DEF - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5 GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1) GLOBAL_STORE_DWORDX2 %0, %5.sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1) SI_RETURN @@ -172,7 +172,7 @@ body: | ; CHECK-NEXT: undef [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]].sub2_sub3:areg_128_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX2_]], 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub2_sub3 ; CHECK-NEXT: [[COPY3:%[0-9]+]].sub2_sub3:areg_128_align2 = IMPLICIT_DEF - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]].sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: SI_RETURN @@ -183,7 +183,7 @@ body: | undef %4.sub2_sub3:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3, 0, 0, 0, implicit $mode, implicit $exec undef %5.sub0_sub1:areg_128_align2 = COPY %4.sub2_sub3 %5.sub2_sub3 = IMPLICIT_DEF - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5 GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1) GLOBAL_STORE_DWORDX2 %0, %5.sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1) SI_RETURN @@ -208,7 +208,7 @@ body: | ; CHECK-NEXT: undef [[V_MFMA_F64_4X4X4F64_vgprcd_e64_:%[0-9]+]].sub2_sub3:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX2_]], 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_vgprcd_e64_]].sub2 ; CHECK-NEXT: [[COPY3:%[0-9]+]].sub2_sub3:areg_128_align2 = IMPLICIT_DEF - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]].sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: SI_RETURN @@ -219,7 +219,7 @@ body: | undef %4.sub2_sub3:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3, 0, 0, 0, implicit $mode, implicit $exec undef %5.sub1:areg_128_align2 = COPY %4.sub2 %5.sub2_sub3 = IMPLICIT_DEF - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5 GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1) GLOBAL_STORE_DWORDX2 %0, %5.sub2_sub3, 0, 0, implicit $exec :: (store (s128), addrspace 1) SI_RETURN diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir index 57f611b..dcf3b8b 100644 --- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr-subreg-src2-chain.mir @@ -17,7 +17,7 @@ body: | ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1) ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]] - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: SI_RETURN %0:vreg_64_align2 = COPY $vgpr4_vgpr5 @@ -26,7 +26,7 @@ body: | %3:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1) %4:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec undef %5.sub0_sub1:areg_128_align2 = COPY %4 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5 GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1) SI_RETURN ... @@ -47,7 +47,7 @@ body: | ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:areg_128_align2 = GLOBAL_LOAD_DWORDX4 [[COPY]], 0, 0, implicit $exec :: (load (s128), addrspace 1) ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[GLOBAL_LOAD_DWORDX4_]].sub2_sub3, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]] - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: SI_RETURN %0:vreg_64_align2 = COPY $vgpr4_vgpr5 @@ -56,7 +56,7 @@ body: | %3:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec :: (load (s128), addrspace 1) %4:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %3.sub2_sub3, 0, 0, 0, implicit $mode, implicit $exec undef %5.sub0_sub1:areg_128_align2 = COPY %4 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %5 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %5 GLOBAL_STORE_DWORDX4 %0, %5, 0, 0, implicit $exec :: (store (s128), addrspace 1) SI_RETURN ... @@ -79,7 +79,7 @@ body: | ; CHECK-NEXT: dead %other_use:vreg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub0_sub1 ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_1:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[V_MFMA_F64_4X4X4F64_e64_]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: [[COPY3:%[0-9]+]]:areg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_1]] - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4521993 /* reguse:VS_64_with_sub0_in_VS_32_Lo128 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:AReg_64_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s64), addrspace 1) ; CHECK-NEXT: SI_RETURN %0:vreg_64_align2 = COPY $vgpr4_vgpr5 @@ -90,7 +90,7 @@ body: | %other_use:vreg_64_align2 = COPY %4.sub0_sub1 %5:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %4.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec %6:areg_64_align2 = COPY %5 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4521993 /* reguse:AReg_64_Align2 */, %6:areg_64_align2 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:AReg_64_Align2 */, %6:areg_64_align2 GLOBAL_STORE_DWORDX2 %0, %6, 0, 0, implicit $exec :: (store (s64), addrspace 1) SI_RETURN ... @@ -114,7 +114,7 @@ body: | ; CHECK-NEXT: undef [[V_MFMA_F64_4X4X4F64_e64_1:%[0-9]+]].sub0_sub1:areg_128_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[V_MFMA_F64_4X4X4F64_e64_]], 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_2:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[V_MFMA_F64_4X4X4F64_e64_1]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: [[COPY3:%[0-9]+]]:areg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_2]] - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4521993 /* reguse:VS_64_with_sub0_in_VS_32_Lo128 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:AReg_64_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX2 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s64), addrspace 1) ; CHECK-NEXT: SI_RETURN %0:vreg_64_align2 = COPY $vgpr4_vgpr5 @@ -126,7 +126,7 @@ body: | undef %5.sub0_sub1:vreg_128_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %4, 0, 0, 0, implicit $mode, implicit $exec %6:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %5.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec %7:areg_64_align2 = COPY %6 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4521993 /* reguse:AReg_64_Align2 */, %7 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:AReg_64_Align2 */, %7 GLOBAL_STORE_DWORDX2 %0, %7, 0, 0, implicit $exec :: (store (s64), addrspace 1) SI_RETURN @@ -151,7 +151,7 @@ body: | ; CHECK-NEXT: dead %other_use:vreg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_1]].sub0_sub1 ; CHECK-NEXT: [[V_MFMA_F64_4X4X4F64_e64_2:%[0-9]+]]:areg_64_align2 = V_MFMA_F64_4X4X4F64_e64 [[COPY1]], [[COPY2]], [[V_MFMA_F64_4X4X4F64_e64_1]].sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_2]] - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: SI_RETURN %0:vreg_64_align2 = COPY $vgpr4_vgpr5 @@ -163,7 +163,7 @@ body: | %other_use:vreg_64_align2 = COPY %5.sub0_sub1 %6:vreg_64_align2 = V_MFMA_F64_4X4X4F64_vgprcd_e64 %1, %2, %5.sub0_sub1, 0, 0, 0, implicit $mode, implicit $exec undef %8.sub0_sub1:areg_128_align2 = COPY %6 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %8:areg_128_align2 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %8:areg_128_align2 GLOBAL_STORE_DWORDX4 %0, %8, 0, 0, implicit $exec :: (store (s128), addrspace 1) SI_RETURN @@ -231,7 +231,7 @@ body: | ; CHECK-NEXT: dead %other_use1:vreg_64_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub2_sub3 ; CHECK-NEXT: dead %other_use2:vreg_64 = COPY [[V_MFMA_F64_4X4X4F64_e64_]].sub1_sub2 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:areg_128_align2 = COPY [[V_MFMA_F64_4X4X4F64_e64_]] - ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, [[COPY3]] + ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, [[COPY3]] ; CHECK-NEXT: GLOBAL_STORE_DWORDX4 [[COPY]], [[COPY3]], 0, 0, implicit $exec :: (store (s128), addrspace 1) ; CHECK-NEXT: SI_RETURN %0:vreg_64_align2 = COPY $vgpr4_vgpr5 @@ -245,7 +245,7 @@ body: | %other_use1:vreg_64_align2 = COPY %4.sub2_sub3 %other_use2:vreg_64 = COPY %4.sub1_sub2 %6:areg_128_align2 = COPY %4 - INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 8650761 /* reguse:AReg_128_Align2 */, %6:areg_128_align2 + INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9568265 /* reguse:AReg_128_Align2 */, %6:areg_128_align2 GLOBAL_STORE_DWORDX4 %0, %6, 0, 0, implicit $exec :: (store (s128), addrspace 1) SI_RETURN ... diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll index 335d58c..a18847b 100644 --- a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll +++ b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll @@ -324,11 +324,9 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; SDAG-LABEL: flat_atomicrmw_b64_rtn_idxprom: ; SDAG: ; %bb.0: ; %entry ; SDAG-NEXT: v_ashrrev_i32_e32 v1, 31, v0 -; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; SDAG-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 3, s[0:1] -; SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi -; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; SDAG-NEXT: v_xor_b32_e32 v0, s0, v3 +; SDAG-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v3 ; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) ; SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0 ; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1 @@ -350,10 +348,9 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0 ; SDAG-NEXT: s_cbranch_execz .LBB21_2 ; SDAG-NEXT: .LBB21_4: ; %atomicrmw.private -; SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo ; SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3] ; SDAG-NEXT: s_wait_loadcnt_dscnt 0x0 -; SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2 +; SDAG-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v2 ; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; SDAG-NEXT: scratch_load_b64 v[0:1], v4, off @@ -367,12 +364,12 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; ; GISEL-LABEL: flat_atomicrmw_b64_rtn_idxprom: ; GISEL: ; %bb.0: ; %entry -; GISEL-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v0, src_flat_scratch_base_hi +; GISEL-NEXT: v_mov_b32_e32 v2, v0 ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GISEL-NEXT: v_ashrrev_i32_e32 v3, 31, v2 ; GISEL-NEXT: v_lshl_add_u64 v[4:5], v[2:3], 3, s[0:1] ; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GISEL-NEXT: v_xor_b32_e32 v0, v5, v0 +; GISEL-NEXT: v_xor_b32_e32 v0, src_flat_scratch_base_hi, v5 ; GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0 ; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GISEL-NEXT: s_and_saveexec_b32 s2, vcc_lo @@ -394,11 +391,10 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg % ; GISEL-NEXT: s_and_not1_saveexec_b32 s0, s2 ; GISEL-NEXT: s_cbranch_execz .LBB21_2 ; GISEL-NEXT: .LBB21_4: ; %atomicrmw.private -; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 -; GISEL-NEXT: v_mov_b32_e32 v0, src_flat_scratch_base_lo ; GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5] -; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GISEL-NEXT: v_sub_nc_u32_e32 v0, v4, v0 +; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0 +; GISEL-NEXT: v_subrev_nc_u32_e32 v0, src_flat_scratch_base_lo, v4 +; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo ; GISEL-NEXT: scratch_load_b64 v[0:1], v4, off ; GISEL-NEXT: s_wait_loadcnt 0x0 diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll index b5bb68e..e0ea08d 100644 --- a/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll +++ b/llvm/test/CodeGen/AMDGPU/scale-offset-smem.ll @@ -97,9 +97,9 @@ entry: define amdgpu_ps <2 x float> @s_load_b64_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { ; GCN-LABEL: s_load_b64_idxprom: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_load_b64 s[0:1], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b64 s[4:5], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 ; GCN-NEXT: ; return to shader part epilog entry: %idxprom = zext i32 %idx to i64 @@ -111,10 +111,10 @@ entry: define amdgpu_ps <3 x float> @s_load_b96_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { ; GCN-LABEL: s_load_b96_idxprom: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_load_b96 s[0:2], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b96 s[4:6], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_mov_b32_e32 v2, s6 ; GCN-NEXT: ; return to shader part epilog entry: %idxprom = zext i32 %idx to i64 @@ -126,10 +126,10 @@ entry: define amdgpu_ps <4 x float> @s_load_b128_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { ; GCN-LABEL: s_load_b128_idxprom: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_load_b128 s[0:3], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b128 s[4:7], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 ; GCN-NEXT: ; return to shader part epilog entry: %idxprom = zext i32 %idx to i64 @@ -141,12 +141,12 @@ entry: define amdgpu_ps <8 x float> @s_load_b256_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { ; GCN-LABEL: s_load_b256_idxprom: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_load_b256 s[0:7], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b256 s[4:11], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 -; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 -; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 +; GCN-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9 +; GCN-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11 ; GCN-NEXT: ; return to shader part epilog entry: %idxprom = zext i32 %idx to i64 @@ -158,16 +158,16 @@ entry: define amdgpu_ps <16 x float> @s_load_b512_idxprom(ptr addrspace(4) align 4 inreg %p, i32 inreg %idx) { ; GCN-LABEL: s_load_b512_idxprom: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_load_b512 s[0:15], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b512 s[4:19], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 -; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 -; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 -; GCN-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9 -; GCN-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11 -; GCN-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13 -; GCN-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 +; GCN-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9 +; GCN-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11 +; GCN-NEXT: v_dual_mov_b32 v8, s12 :: v_dual_mov_b32 v9, s13 +; GCN-NEXT: v_dual_mov_b32 v10, s14 :: v_dual_mov_b32 v11, s15 +; GCN-NEXT: v_dual_mov_b32 v12, s16 :: v_dual_mov_b32 v13, s17 +; GCN-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s19 ; GCN-NEXT: ; return to shader part epilog entry: %idxprom = zext i32 %idx to i64 @@ -275,11 +275,11 @@ entry: define amdgpu_ps <2 x float> @s_load_b64_idxprom_range(ptr addrspace(4) align 4 inreg %p) { ; GCN-LABEL: s_load_b64_idxprom_range: ; GCN: ; %bb.0: ; %entry -; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 +; GCN-NEXT: s_load_b32 s4, s[0:1], 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: s_load_b64 s[0:1], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b64 s[2:3], s[0:1], s4 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GCN-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3 ; GCN-NEXT: ; return to shader part epilog entry: %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 @@ -294,10 +294,10 @@ define amdgpu_ps <3 x float> @s_load_b96_idxprom_range(ptr addrspace(4) align 4 ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: s_load_b96 s[0:2], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b96 s[4:6], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_mov_b32_e32 v2, s2 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_mov_b32_e32 v2, s6 ; GCN-NEXT: ; return to shader part epilog entry: %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 @@ -312,10 +312,10 @@ define amdgpu_ps <4 x float> @s_load_b128_idxprom_range(ptr addrspace(4) align 4 ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: s_load_b128 s[0:3], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b128 s[4:7], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 ; GCN-NEXT: ; return to shader part epilog entry: %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 @@ -330,12 +330,12 @@ define amdgpu_ps <8 x float> @s_load_b256_idxprom_range(ptr addrspace(4) align 4 ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: s_load_b256 s[0:7], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b256 s[4:11], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 -; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 -; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 +; GCN-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9 +; GCN-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11 ; GCN-NEXT: ; return to shader part epilog entry: %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 @@ -350,16 +350,16 @@ define amdgpu_ps <16 x float> @s_load_b512_idxprom_range(ptr addrspace(4) align ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_b32 s2, s[0:1], 0x0 ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: s_load_b512 s[0:15], s[0:1], s2 offset:0x0 scale_offset +; GCN-NEXT: s_load_b512 s[4:19], s[0:1], s2 offset:0x0 scale_offset ; GCN-NEXT: s_wait_kmcnt 0x0 -; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GCN-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 -; GCN-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 -; GCN-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7 -; GCN-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9 -; GCN-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11 -; GCN-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13 -; GCN-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15 +; GCN-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GCN-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7 +; GCN-NEXT: v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, s9 +; GCN-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11 +; GCN-NEXT: v_dual_mov_b32 v8, s12 :: v_dual_mov_b32 v9, s13 +; GCN-NEXT: v_dual_mov_b32 v10, s14 :: v_dual_mov_b32 v11, s15 +; GCN-NEXT: v_dual_mov_b32 v12, s16 :: v_dual_mov_b32 v13, s17 +; GCN-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s19 ; GCN-NEXT: ; return to shader part epilog entry: %idx = load i32, ptr addrspace(4) %p, align 4, !range !0 diff --git a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll index f2ecfe8..3d74b17 100644 --- a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll +++ b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll @@ -17,16 +17,16 @@ define amdgpu_kernel void @v_ashr_pk_i8_i32(ptr addrspace(1) %out, i32 %src0, i3 ; ; GFX1250-LABEL: v_ashr_pk_i8_i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x2c -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_and_b32 s2, s2, 31 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_mov_b32_e32 v0, s2 ; GFX1250-NEXT: v_ashr_pk_i8_i32 v0, s0, s1, v0 -; GFX1250-NEXT: global_store_b16 v1, v0, s[4:5] +; GFX1250-NEXT: global_store_b16 v1, v0, s[6:7] ; GFX1250-NEXT: s_endpgm %insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0 %build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1 @@ -58,16 +58,16 @@ define amdgpu_kernel void @v_ashr_pk_u8_i32(ptr addrspace(1) %out, i32 %src0, i3 ; ; GFX1250-LABEL: v_ashr_pk_u8_i32: ; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_clause 0x1 ; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x2c -; GFX1250-NEXT: s_wait_xcnt 0x0 -; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x24 +; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 ; GFX1250-NEXT: s_and_b32 s2, s2, 31 ; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1250-NEXT: v_mov_b32_e32 v0, s2 ; GFX1250-NEXT: v_ashr_pk_u8_i32 v0, s0, s1, v0 -; GFX1250-NEXT: global_store_b16 v1, v0, s[4:5] +; GFX1250-NEXT: global_store_b16 v1, v0, s[6:7] ; GFX1250-NEXT: s_endpgm %insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0 %build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1 diff --git a/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir b/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir index af8b9e7..6fe99d8 100644 --- a/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir +++ b/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir @@ -520,6 +520,7 @@ body: | ; GCN-NEXT: GLOBAL_STORE_DWORDX2 $vgpr0_vgpr1, $vgpr4_vgpr5, 16, 0, implicit $exec ; GCN-NEXT: S_WAIT_KMCNT 0 ; GCN-NEXT: $sgpr2 = S_ADD_I32 $sgpr0, 100, implicit-def $scc + ; GCN-NEXT: S_WAIT_XCNT 0 ; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 20, implicit $exec $sgpr2_sgpr3 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 0, 0 :: (load (s64), addrspace 4) $vgpr0 = V_MOV_B32_e32 1, implicit $exec @@ -921,7 +922,6 @@ body: | $vgpr2 = V_MOV_B32_e32 1, implicit $exec ... -# FIXME: Missing S_WAIT_XCNT before overwriting vgpr0. --- name: wait_kmcnt_with_outstanding_vmem tracksRegLiveness: true @@ -937,6 +937,7 @@ body: | ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec ; GCN-NEXT: S_WAIT_KMCNT 0 ; GCN-NEXT: $sgpr2 = S_MOV_B32 $sgpr2 + ; GCN-NEXT: S_WAIT_XCNT 0 ; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec @@ -944,7 +945,6 @@ body: | $vgpr0 = V_MOV_B32_e32 0, implicit $exec ... -# FIXME: Missing S_WAIT_XCNT before overwriting sgpr0. --- name: wait_loadcnt_with_outstanding_smem tracksRegLiveness: true @@ -960,6 +960,7 @@ body: | ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 ; GCN-NEXT: S_WAIT_LOADCNT 0 ; GCN-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr2, implicit $exec + ; GCN-NEXT: S_WAIT_XCNT 0 ; GCN-NEXT: $sgpr0 = S_MOV_B32 0 $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 @@ -967,7 +968,6 @@ body: | $sgpr0 = S_MOV_B32 0 ... -# TODO: Unnecessary wait before overwriting vgpr0. --- name: overwrite_vgpr_after_smem tracksRegLiveness: true @@ -981,14 +981,12 @@ body: | ; GCN-NEXT: {{ $}} ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 - ; GCN-NEXT: S_WAIT_XCNT 0 ; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 $vgpr0 = V_MOV_B32_e32 0, implicit $exec ... -# TODO: Unnecessary wait before overwriting sgpr0. --- name: overwrite_sgpr_after_vmem tracksRegLiveness: true @@ -1002,7 +1000,6 @@ body: | ; GCN-NEXT: {{ $}} ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec - ; GCN-NEXT: S_WAIT_XCNT 0 ; GCN-NEXT: $sgpr0 = S_MOV_B32 0 $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/workgroup-id-in-arch-sgprs.ll b/llvm/test/CodeGen/AMDGPU/workgroup-id-in-arch-sgprs.ll index a392692..6636eb5 100644 --- a/llvm/test/CodeGen/AMDGPU/workgroup-id-in-arch-sgprs.ll +++ b/llvm/test/CodeGen/AMDGPU/workgroup-id-in-arch-sgprs.ll @@ -211,38 +211,39 @@ define amdgpu_kernel void @workgroup_id_xyz(ptr addrspace(1) %ptrx, ptr addrspac ; GFX1250-SDAG-LABEL: workgroup_id_xyz: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_bfe_u32 s0, ttmp6, 0x40014 -; GFX1250-SDAG-NEXT: s_lshr_b32 s6, ttmp7, 16 -; GFX1250-SDAG-NEXT: s_add_co_i32 s7, s0, 1 +; GFX1250-SDAG-NEXT: s_lshr_b32 s8, ttmp7, 16 +; GFX1250-SDAG-NEXT: s_add_co_i32 s9, s0, 1 +; GFX1250-SDAG-NEXT: s_clause 0x1 ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 +; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 -; GFX1250-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 +; GFX1250-SDAG-NEXT: s_mul_i32 s4, s8, s9 ; GFX1250-SDAG-NEXT: s_bfe_u32 s9, ttmp6, 0x40010 -; GFX1250-SDAG-NEXT: s_mul_i32 s7, s6, s7 -; GFX1250-SDAG-NEXT: s_bfe_u32 s8, ttmp6, 0x40008 +; GFX1250-SDAG-NEXT: s_bfe_u32 s5, ttmp6, 0x40008 ; GFX1250-SDAG-NEXT: s_and_b32 s10, ttmp7, 0xffff ; GFX1250-SDAG-NEXT: s_add_co_i32 s9, s9, 1 ; GFX1250-SDAG-NEXT: s_bfe_u32 s11, ttmp6, 0x4000c -; GFX1250-SDAG-NEXT: s_add_co_i32 s8, s8, s7 -; GFX1250-SDAG-NEXT: s_mul_i32 s7, s10, s9 +; GFX1250-SDAG-NEXT: s_add_co_i32 s5, s5, s4 +; GFX1250-SDAG-NEXT: s_mul_i32 s4, s10, s9 ; GFX1250-SDAG-NEXT: s_bfe_u32 s9, ttmp6, 0x40004 ; GFX1250-SDAG-NEXT: s_add_co_i32 s11, s11, 1 -; GFX1250-SDAG-NEXT: s_add_co_i32 s9, s9, s7 -; GFX1250-SDAG-NEXT: s_and_b32 s7, ttmp6, 15 +; GFX1250-SDAG-NEXT: s_add_co_i32 s9, s9, s4 +; GFX1250-SDAG-NEXT: s_and_b32 s4, ttmp6, 15 ; GFX1250-SDAG-NEXT: s_mul_i32 s11, ttmp9, s11 ; GFX1250-SDAG-NEXT: s_getreg_b32 s12, hwreg(HW_REG_IB_STS2, 6, 4) -; GFX1250-SDAG-NEXT: s_add_co_i32 s7, s7, s11 +; GFX1250-SDAG-NEXT: s_add_co_i32 s4, s4, s11 ; GFX1250-SDAG-NEXT: s_cmp_eq_u32 s12, 0 -; GFX1250-SDAG-NEXT: s_cselect_b32 s7, ttmp9, s7 +; GFX1250-SDAG-NEXT: s_cselect_b32 s4, ttmp9, s4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s7 -; GFX1250-SDAG-NEXT: s_cselect_b32 s7, s10, s9 -; GFX1250-SDAG-NEXT: s_cselect_b32 s6, s6, s8 -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s6 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4 +; GFX1250-SDAG-NEXT: s_cselect_b32 s4, s10, s9 +; GFX1250-SDAG-NEXT: s_cselect_b32 s5, s8, s5 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_clause 0x2 ; GFX1250-SDAG-NEXT: global_store_b32 v0, v1, s[0:1] ; GFX1250-SDAG-NEXT: global_store_b32 v0, v2, s[2:3] -; GFX1250-SDAG-NEXT: global_store_b32 v0, v3, s[4:5] +; GFX1250-SDAG-NEXT: global_store_b32 v0, v3, s[6:7] ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: workgroup_id_xyz: @@ -250,39 +251,40 @@ define amdgpu_kernel void @workgroup_id_xyz(ptr addrspace(1) %ptrx, ptr addrspac ; GFX1250-GISEL-NEXT: s_bfe_u32 s0, ttmp6, 0x4000c ; GFX1250-GISEL-NEXT: s_and_b32 s1, ttmp6, 15 ; GFX1250-GISEL-NEXT: s_add_co_i32 s0, s0, 1 -; GFX1250-GISEL-NEXT: s_getreg_b32 s6, hwreg(HW_REG_IB_STS2, 6, 4) +; GFX1250-GISEL-NEXT: s_getreg_b32 s8, hwreg(HW_REG_IB_STS2, 6, 4) ; GFX1250-GISEL-NEXT: s_mul_i32 s0, ttmp9, s0 ; GFX1250-GISEL-NEXT: v_mov_b32_e32 v1, 0 ; GFX1250-GISEL-NEXT: s_add_co_i32 s1, s1, s0 -; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s6, 0 -; GFX1250-GISEL-NEXT: s_cselect_b32 s7, ttmp9, s1 +; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s8, 0 +; GFX1250-GISEL-NEXT: s_cselect_b32 s9, ttmp9, s1 ; GFX1250-GISEL-NEXT: s_bfe_u32 s0, ttmp6, 0x40010 -; GFX1250-GISEL-NEXT: s_and_b32 s8, ttmp7, 0xffff +; GFX1250-GISEL-NEXT: s_and_b32 s10, ttmp7, 0xffff ; GFX1250-GISEL-NEXT: s_add_co_i32 s0, s0, 1 -; GFX1250-GISEL-NEXT: s_bfe_u32 s9, ttmp6, 0x40004 -; GFX1250-GISEL-NEXT: s_mul_i32 s10, s8, s0 +; GFX1250-GISEL-NEXT: s_bfe_u32 s11, ttmp6, 0x40004 +; GFX1250-GISEL-NEXT: s_mul_i32 s12, s10, s0 +; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x0 +; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x10 +; GFX1250-GISEL-NEXT: s_add_co_i32 s11, s11, s12 +; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s8, 0 +; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, s9 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 -; GFX1250-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x10 -; GFX1250-GISEL-NEXT: s_add_co_i32 s9, s9, s10 -; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s6, 0 -; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, s7 -; GFX1250-GISEL-NEXT: s_cselect_b32 s8, s8, s9 -; GFX1250-GISEL-NEXT: s_bfe_u32 s9, ttmp6, 0x40014 +; GFX1250-GISEL-NEXT: s_cselect_b32 s4, s10, s11 +; GFX1250-GISEL-NEXT: s_bfe_u32 s5, ttmp6, 0x40014 ; GFX1250-GISEL-NEXT: s_lshr_b32 s10, ttmp7, 16 -; GFX1250-GISEL-NEXT: s_add_co_i32 s9, s9, 1 +; GFX1250-GISEL-NEXT: s_add_co_i32 s5, s5, 1 ; GFX1250-GISEL-NEXT: s_bfe_u32 s11, ttmp6, 0x40008 -; GFX1250-GISEL-NEXT: s_mul_i32 s9, s10, s9 +; GFX1250-GISEL-NEXT: s_mul_i32 s5, s10, s5 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) -; GFX1250-GISEL-NEXT: s_add_co_i32 s11, s11, s9 -; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s6, 0 -; GFX1250-GISEL-NEXT: s_cselect_b32 s6, s10, s11 -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v3, s6 +; GFX1250-GISEL-NEXT: s_add_co_i32 s11, s11, s5 +; GFX1250-GISEL-NEXT: s_cmp_eq_u32 s8, 0 +; GFX1250-GISEL-NEXT: s_cselect_b32 s5, s10, s11 +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_clause 0x2 ; GFX1250-GISEL-NEXT: global_store_b32 v1, v0, s[0:1] ; GFX1250-GISEL-NEXT: global_store_b32 v1, v2, s[2:3] -; GFX1250-GISEL-NEXT: global_store_b32 v1, v3, s[4:5] +; GFX1250-GISEL-NEXT: global_store_b32 v1, v3, s[6:7] ; GFX1250-GISEL-NEXT: s_endpgm ; GFX12-LABEL: workgroup_id_xyz: ; GFX12: ; %bb.0: diff --git a/llvm/test/CodeGen/ARM/and-mask-variable.ll b/llvm/test/CodeGen/ARM/and-mask-variable.ll new file mode 100644 index 0000000..0f84b76 --- /dev/null +++ b/llvm/test/CodeGen/ARM/and-mask-variable.ll @@ -0,0 +1,90 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M +; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A +; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T +; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M + +define i32 @mask_pair(i32 %x, i32 %y) { +; V7M-LABEL: mask_pair: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: mask_pair: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: mask_pair: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: mask_pair: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: bx lr + %shl = shl nsw i32 -1, %y + %and = and i32 %shl, %x + ret i32 %and +} + +define i64 @mask_pair_64(i64 %x, i64 %y) { +; V7M-LABEL: mask_pair_64: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r12, r3, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl.w r12, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r2 +; V7M-NEXT: and.w r0, r0, r12 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: mask_pair_64: +; V7A: @ %bb.0: +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: and r0, r2, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: mask_pair_64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r12, r3, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r2 +; V7A-T-NEXT: and.w r0, r0, r12 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: mask_pair_64: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shl = shl nsw i64 -1, %y + %and = and i64 %shl, %x + ret i64 %and +} diff --git a/llvm/test/CodeGen/ARM/extract-bits.ll b/llvm/test/CodeGen/ARM/extract-bits.ll new file mode 100644 index 0000000..77deaa5 --- /dev/null +++ b/llvm/test/CodeGen/ARM/extract-bits.ll @@ -0,0 +1,4591 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M +; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A +; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T +; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M + +; Patterns: +; a) (x >> start) & (1 << nbits) - 1 +; b) (x >> start) & ~(-1 << nbits) +; c) (x >> start) & (-1 >> (32 - y)) +; d) (x >> start) << (32 - y) >> (32 - y) +; are equivalent. + +; ---------------------------------------------------------------------------- ; +; Pattern a. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bextr32_a0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_a0: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a0: +; V7A: @ %bb.0: +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a0: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: subs r1, r1, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a0_arithmetic(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_a0_arithmetic: +; V7M: @ %bb.0: +; V7M-NEXT: asrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a0_arithmetic: +; V7A: @ %bb.0: +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, asr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a0_arithmetic: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: asrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a0_arithmetic: +; V6M: @ %bb.0: +; V6M-NEXT: asrs r0, r1 +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: subs r1, r1, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %shifted = ashr i32 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr32_a1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: subs r1, r1, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %conv = zext i8 %numlowbits to i32 + %onebit = shl i32 1, %conv + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_a2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a2_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: lsrs r3, r1 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: ands r0, r3 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %shifted = lshr i32 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr32_a3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: lsrs r3, r1 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: ands r0, r3 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %conv = zext i8 %numlowbits to i32 + %onebit = shl i32 1, %conv + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_a4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_a4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: movs r1, #1 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_a4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r2, r3, r12, lsl r2 +; V7A-NEXT: and r0, r2, r0, lsr r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_a4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsls r1, r2 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_a4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: subs r1, r1, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %shifted, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bextr64_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_a0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: rsb.w r4, r12, #32 +; V7M-NEXT: subs.w r3, r12, #32 +; V7M-NEXT: lsr.w r4, lr, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r4, lr, r3 +; V7M-NEXT: lsl.w r3, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs r3, #1 +; V7M-NEXT: sbc r12, r4, #0 +; V7M-NEXT: rsb.w r4, r2, #32 +; V7M-NEXT: lsl.w r4, r1, r4 +; V7M-NEXT: orrs r0, r4 +; V7M-NEXT: subs.w r4, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r4 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: and.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_a0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r3, r12, #32 +; V7A-NEXT: subs r4, r12, #32 +; V7A-NEXT: lsr r3, lr, r3 +; V7A-NEXT: lslpl r3, lr, r4 +; V7A-NEXT: lsl r4, lr, r12 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs r4, r4, #1 +; V7A-NEXT: sbc r12, r3, #0 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r0, r4, r0 +; V7A-NEXT: and r1, r12, r1 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: rsb.w r4, r12, #32 +; V7A-T-NEXT: subs.w r3, r12, #32 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r3 +; V7A-T-NEXT: lsl.w r3, lr, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: subs r3, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r0, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #12 +; V6M-NEXT: sub sp, #12 +; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill +; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: ldr r2, [sp, #32] +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: subs r5, r0, #1 +; V6M-NEXT: sbcs r4, r7 +; V6M-NEXT: mov r0, r6 +; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload +; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: add sp, #12 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a0_arithmetic(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_a0_arithmetic: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: rsb.w r4, r12, #32 +; V7M-NEXT: subs.w r3, r12, #32 +; V7M-NEXT: lsr.w r4, lr, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r4, lr, r3 +; V7M-NEXT: lsl.w r3, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs r3, #1 +; V7M-NEXT: sbc r12, r4, #0 +; V7M-NEXT: rsb.w r4, r2, #32 +; V7M-NEXT: lsl.w r4, r1, r4 +; V7M-NEXT: orrs r0, r4 +; V7M-NEXT: subs.w r4, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: asrpl.w r0, r1, r4 +; V7M-NEXT: asr.w r2, r1, r2 +; V7M-NEXT: and.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: asrpl r2, r1, #31 +; V7M-NEXT: and.w r1, r12, r2 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_a0_arithmetic: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r3, r12, #32 +; V7A-NEXT: subs r4, r12, #32 +; V7A-NEXT: lsr r3, lr, r3 +; V7A-NEXT: lslpl r3, lr, r4 +; V7A-NEXT: lsl r4, lr, r12 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs r4, r4, #1 +; V7A-NEXT: sbc r12, r3, #0 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: asr r2, r1, r2 +; V7A-NEXT: asrpl r0, r1, r3 +; V7A-NEXT: asrpl r2, r1, #31 +; V7A-NEXT: and r0, r4, r0 +; V7A-NEXT: and r1, r12, r2 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_a0_arithmetic: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: rsb.w r4, r12, #32 +; V7A-T-NEXT: subs.w r3, r12, #32 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r3 +; V7A-T-NEXT: lsl.w r3, lr, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: subs r3, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r0, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: asrpl.w r0, r1, r4 +; V7A-T-NEXT: asr.w r2, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: asrpl r2, r1, #31 +; V7A-T-NEXT: and.w r1, r12, r2 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a0_arithmetic: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #12 +; V6M-NEXT: sub sp, #12 +; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill +; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: ldr r2, [sp, #32] +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: subs r5, r0, #1 +; V6M-NEXT: sbcs r4, r7 +; V6M-NEXT: mov r0, r6 +; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload +; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_lasr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: add sp, #12 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %shifted = ashr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr64_a1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: rsb.w r4, r3, #32 +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: subs.w r12, r3, #32 +; V7M-NEXT: lsl.w r3, lr, r3 +; V7M-NEXT: lsr.w r4, lr, r4 +; V7M-NEXT: lsr.w r0, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r4, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs r3, #1 +; V7M-NEXT: sbc r12, r4, #0 +; V7M-NEXT: rsb.w r4, r2, #32 +; V7M-NEXT: lsl.w r4, r1, r4 +; V7M-NEXT: orrs r0, r4 +; V7M-NEXT: subs.w r4, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r4 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: and.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_a1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: rsb r12, r3, #32 +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: subs r4, r3, #32 +; V7A-NEXT: lsl r3, lr, r3 +; V7A-NEXT: lsr r12, lr, r12 +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: lslpl r12, lr, r4 +; V7A-NEXT: rsb r4, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: subs r3, r3, #1 +; V7A-NEXT: sbc r12, r12, #0 +; V7A-NEXT: orr r0, r0, r1, lsl r4 +; V7A-NEXT: subs r4, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r4 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: and r1, r12, r1 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_a1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r4, r3, #32 +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: subs.w r12, r3, #32 +; V7A-T-NEXT: lsl.w r3, lr, r3 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: lsr.w r0, r0, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: subs r3, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r0, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #12 +; V6M-NEXT: sub sp, #12 +; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill +; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: mov r2, r3 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: subs r5, r0, #1 +; V6M-NEXT: sbcs r4, r7 +; V6M-NEXT: mov r0, r6 +; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload +; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: add sp, #12 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %conv = zext i8 %numlowbits to i64 + %onebit = shl i64 1, %conv + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_a2_load: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: rsb.w r1, r12, #32 +; V7M-NEXT: subs.w r3, r12, #32 +; V7M-NEXT: lsr.w r1, lr, r1 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, lr, r3 +; V7M-NEXT: lsl.w r3, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs.w lr, r3, #1 +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: sbc r12, r1, #0 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: orrs r0, r1 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r1 +; V7M-NEXT: lsr.w r1, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r0, r0, lr +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_a2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r6, lr} +; V7A-NEXT: push {r4, r5, r6, lr} +; V7A-NEXT: ldr r1, [sp, #16] +; V7A-NEXT: mov r3, #1 +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: ldr r5, [r0, #4] +; V7A-NEXT: rsb r0, r1, #32 +; V7A-NEXT: subs r4, r1, #32 +; V7A-NEXT: lsl r1, r3, r1 +; V7A-NEXT: lsr r0, r3, r0 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lslpl r0, r3, r4 +; V7A-NEXT: subs r1, r1, #1 +; V7A-NEXT: sbc r3, r0, #0 +; V7A-NEXT: lsr r0, r6, r2 +; V7A-NEXT: rsb r6, r2, #32 +; V7A-NEXT: orr r0, r0, r5, lsl r6 +; V7A-NEXT: subs r6, r2, #32 +; V7A-NEXT: lsrpl r0, r5, r6 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: lsr r1, r5, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: pop {r4, r5, r6, pc} +; +; V7A-T-LABEL: bextr64_a2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: movs r3, #1 +; V7A-T-NEXT: ldrd lr, r1, [r0] +; V7A-T-NEXT: rsb.w r4, r12, #32 +; V7A-T-NEXT: subs.w r0, r12, #32 +; V7A-T-NEXT: lsr.w r4, r3, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, r3, r0 +; V7A-T-NEXT: lsl.w r0, r3, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsr.w r3, lr, r2 +; V7A-T-NEXT: subs r0, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r3, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r3, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #4 +; V6M-NEXT: sub sp, #4 +; V6M-NEXT: str r2, [sp] @ 4-byte Spill +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: ldr r2, [sp, #24] +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r6, r1 +; V6M-NEXT: subs r4, r0, #1 +; V6M-NEXT: sbcs r6, r7 +; V6M-NEXT: ldm r5!, {r0, r1} +; V6M-NEXT: ldr r2, [sp] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: ands r1, r6 +; V6M-NEXT: add sp, #4 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %val = load i64, ptr %w + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr64_a3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs.w lr, r2, #1 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: sbc r12, r3, #0 +; V7M-NEXT: rsb.w r3, r1, #32 +; V7M-NEXT: lsl.w r3, r2, r3 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r1, #32 +; V7M-NEXT: lsr.w r1, r2, r1 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r2, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r0, r0, lr +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_a3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r6, lr} +; V7A-NEXT: push {r4, r5, r6, lr} +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: mov r3, #1 +; V7A-NEXT: ldr r5, [r0, #4] +; V7A-NEXT: rsb r0, r2, #32 +; V7A-NEXT: subs r4, r2, #32 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lsr r0, r3, r0 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl r0, r3, r4 +; V7A-NEXT: subs r3, r2, #1 +; V7A-NEXT: sbc r0, r0, #0 +; V7A-NEXT: lsr r2, r5, r1 +; V7A-NEXT: subs r4, r1, #32 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r2, r0, r2 +; V7A-NEXT: lsr r0, r6, r1 +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: orr r0, r0, r5, lsl r1 +; V7A-NEXT: mov r1, r2 +; V7A-NEXT: lsrpl r0, r5, r4 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: pop {r4, r5, r6, pc} +; +; V7A-T-LABEL: bextr64_a3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: lsl.w r2, lr, r2 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: ldrd r12, r0, [r0] +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs.w lr, r2, #1 +; V7A-T-NEXT: sbc r2, r4, #0 +; V7A-T-NEXT: lsr.w r4, r0, r1 +; V7A-T-NEXT: subs.w r3, r1, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r4, #0 +; V7A-T-NEXT: and.w r2, r2, r4 +; V7A-T-NEXT: rsb.w r4, r1, #32 +; V7A-T-NEXT: lsr.w r1, r12, r1 +; V7A-T-NEXT: lsl.w r4, r0, r4 +; V7A-T-NEXT: orr.w r1, r1, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r1, r0, r3 +; V7A-T-NEXT: and.w r0, lr, r1 +; V7A-T-NEXT: mov r1, r2 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #4 +; V6M-NEXT: sub sp, #4 +; V6M-NEXT: str r1, [sp] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: subs r4, r0, #1 +; V6M-NEXT: sbcs r5, r7 +; V6M-NEXT: ldm r6!, {r0, r1} +; V6M-NEXT: ldr r2, [sp] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: add sp, #4 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %val = load i64, ptr %w + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %conv = zext i8 %numlowbits to i64 + %onebit = shl i64 1, %conv + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_a4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_a4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: mov.w lr, #1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: rsb.w r4, r12, #32 +; V7M-NEXT: subs.w r3, r12, #32 +; V7M-NEXT: lsr.w r4, lr, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r4, lr, r3 +; V7M-NEXT: lsl.w r3, lr, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: subs r3, #1 +; V7M-NEXT: sbc r12, r4, #0 +; V7M-NEXT: rsb.w r4, r2, #32 +; V7M-NEXT: lsl.w r4, r1, r4 +; V7M-NEXT: orrs r0, r4 +; V7M-NEXT: subs.w r4, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r4 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: and.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: and.w r1, r1, r12 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_a4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r3, r12, #32 +; V7A-NEXT: subs r4, r12, #32 +; V7A-NEXT: lsr r3, lr, r3 +; V7A-NEXT: lslpl r3, lr, r4 +; V7A-NEXT: lsl r4, lr, r12 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs r4, r4, #1 +; V7A-NEXT: sbc r12, r3, #0 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r0, r0, r4 +; V7A-NEXT: and r1, r1, r12 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_a4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: rsb.w r4, r12, #32 +; V7A-T-NEXT: subs.w r3, r12, #32 +; V7A-T-NEXT: lsr.w r4, lr, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r4, lr, r3 +; V7A-T-NEXT: lsl.w r3, lr, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: subs r3, #1 +; V7A-T-NEXT: sbc r12, r4, #0 +; V7A-T-NEXT: rsb.w r4, r2, #32 +; V7A-T-NEXT: lsl.w r4, r1, r4 +; V7A-T-NEXT: orrs r0, r4 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_a4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, r7, lr} +; V6M-NEXT: push {r4, r5, r6, r7, lr} +; V6M-NEXT: .pad #12 +; V6M-NEXT: sub sp, #12 +; V6M-NEXT: str r2, [sp, #8] @ 4-byte Spill +; V6M-NEXT: str r1, [sp, #4] @ 4-byte Spill +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r7, #0 +; V6M-NEXT: ldr r2, [sp, #32] +; V6M-NEXT: mov r1, r7 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: subs r5, r0, #1 +; V6M-NEXT: sbcs r4, r7 +; V6M-NEXT: mov r0, r6 +; V6M-NEXT: ldr r1, [sp, #4] @ 4-byte Reload +; V6M-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: add sp, #12 +; V6M-NEXT: pop {r4, r5, r6, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %shifted, %mask ; swapped order + ret i64 %masked +} + +; 64-bit, but with 32-bit output + +; Everything done in 64-bit, truncation happens last. +define i32 @bextr64_32_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_a0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsls r2, r1 +; V7M-NEXT: subs r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r1, r2, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_a0: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: mov r1, #1 +; V7A-NEXT: lsl r1, r1, r12 +; V7A-NEXT: subs r2, r12, #32 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: sub r1, r1, #1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: subs.w r2, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_a0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: ldr r2, [sp, #8] +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, pc} + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %shifted + %res = trunc i64 %masked to i32 + ret i32 %res +} + +; Shifting happens in 64-bit, then truncation. Masking is 32-bit. +define i32 @bextr64_32_a1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_a1: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_a1: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: add r12, r3, lr, lsl r12 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: and r0, r12, r0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_32_a1: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_a1: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %truncshifted = trunc i64 %shifted to i32 + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %truncshifted + ret i32 %masked +} + +; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit. +; Masking is 64-bit. Then truncation. +define i32 @bextr64_32_a2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_a2: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_a2: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: mov lr, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: add r12, r3, lr, lsl r12 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: and r0, r12, r0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_32_a2: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_a2: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %zextmask = zext i32 %mask to i64 + %masked = and i64 %zextmask, %shifted + %truncmasked = trunc i64 %masked to i32 + ret i32 %truncmasked +} + +; ---------------------------------------------------------------------------- ; +; Pattern b. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bextr32_b0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_b0: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b0: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b0: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: mvns r1, r1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_b1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr32_b1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: mvns r1, r1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %conv = zext i8 %numlowbits to i32 + %notmask = shl i32 -1, %conv + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_b2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_b2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #0 +; V6M-NEXT: mvns r3, r3 +; V6M-NEXT: lsls r3, r2 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bics r0, r3 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %shifted = lshr i32 %val, %numskipbits + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr32_b3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #0 +; V6M-NEXT: mvns r3, r3 +; V6M-NEXT: lsls r3, r2 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bics r0, r3 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %conv = zext i8 %numlowbits to i32 + %notmask = shl i32 -1, %conv + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_b4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_b4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_b4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_b4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_b4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: mvns r1, r1 +; V6M-NEXT: lsls r1, r2 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %shifted, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bextr64_b0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_b0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsl.w r3, r2, r12 +; V7M-NEXT: subs.w lr, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r2, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: bics r1, r2 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_b0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: subs lr, r12, #32 +; V7A-NEXT: lsl r2, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: lslpl r3, r3, lr +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r7, lr} +; V7A-T-NEXT: push {r4, r5, r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #16] +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r5, r0, r3 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: subs.w lr, r12, #32 +; V7A-T-NEXT: lsl.w r0, r3, r12 +; V7A-T-NEXT: itt pl +; V7A-T-NEXT: lslpl.w r3, r3, lr +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r5, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: bic.w r0, r5, r0 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: pop {r4, r5, r7, pc} +; +; V6M-LABEL: bextr64_b0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: ldr r2, [sp, #16] +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r4, r0 +; V6M-NEXT: bics r5, r1 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_b1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr64_b1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: lsr.w r12, r0, r2 +; V7M-NEXT: rsb.w r0, r2, #32 +; V7M-NEXT: lsl.w r0, r1, r0 +; V7M-NEXT: orr.w r12, r12, r0 +; V7M-NEXT: subs.w r0, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r12, r1, r0 +; V7M-NEXT: lsr.w r0, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: subs.w r1, r3, #32 +; V7M-NEXT: lsl.w r3, r2, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r2, r1 +; V7M-NEXT: bic.w r1, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: bic.w r0, r12, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_b1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r12, r0, r2 +; V7A-NEXT: rsb r0, r2, #32 +; V7A-NEXT: orr r12, r12, r1, lsl r0 +; V7A-NEXT: subs r0, r2, #32 +; V7A-NEXT: lsrpl r12, r1, r0 +; V7A-NEXT: lsr r0, r1, r2 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: subs r1, r3, #32 +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: lsl r3, r2, r3 +; V7A-NEXT: lslpl r2, r2, r1 +; V7A-NEXT: bic r1, r0, r2 +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: bic r0, r12, r3 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_b1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsr.w r12, r0, r2 +; V7A-T-NEXT: rsb.w r0, r2, #32 +; V7A-T-NEXT: lsl.w r0, r1, r0 +; V7A-T-NEXT: orr.w r12, r12, r0 +; V7A-T-NEXT: subs.w r0, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r12, r1, r0 +; V7A-T-NEXT: lsr.w r0, r1, r2 +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs.w r1, r3, #32 +; V7A-T-NEXT: lsl.w r3, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r2, r1 +; V7A-T-NEXT: bic.w r1, r0, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: bic.w r0, r12, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_b1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r4, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r6, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r6, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: pop {r4, r5, r6, pc} + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %conv = zext i8 %numlowbits to i64 + %notmask = shl i64 -1, %conv + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_b2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_b2_load: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: orrs r0, r1 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r1 +; V7M-NEXT: lsr.w r1, r3, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsl.w r3, r2, r12 +; V7M-NEXT: subs.w lr, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r2, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: bics r1, r2 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_b2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: ldrd r0, r1, [r0] +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: subs lr, r12, #32 +; V7A-NEXT: lsl r2, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: lslpl r3, r3, lr +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_b2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: ldrd r0, r3, [r0] +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: lsl.w r1, r3, r1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: orrs r0, r1 +; V7A-T-NEXT: subs.w r1, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r3, r1 +; V7A-T-NEXT: lsr.w r1, r3, r2 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: lsl.w r2, r3, r12 +; V7A-T-NEXT: subs.w lr, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r3, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: bics r0, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bextr64_b2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: ldr r2, [sp, #16] +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r4, r0 +; V6M-NEXT: bics r5, r1 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %shifted = lshr i64 %val, %numskipbits + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_b3_load_indexzext(ptr %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bextr64_b3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: ldrd r12, r0, [r0] +; V7M-NEXT: rsb.w r3, r1, #32 +; V7M-NEXT: lsl.w lr, r0, r3 +; V7M-NEXT: lsr.w r3, r12, r1 +; V7M-NEXT: orr.w r12, r3, lr +; V7M-NEXT: subs.w r3, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r12, r0, r3 +; V7M-NEXT: lsr.w r0, r0, r1 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r1 +; V7M-NEXT: bic.w r1, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: bic.w r0, r12, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_b3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldm r0, {r0, r3} +; V7A-NEXT: lsr r12, r0, r1 +; V7A-NEXT: rsb r0, r1, #32 +; V7A-NEXT: orr r12, r12, r3, lsl r0 +; V7A-NEXT: subs r0, r1, #32 +; V7A-NEXT: lsrpl r12, r3, r0 +; V7A-NEXT: lsr r0, r3, r1 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: subs r1, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r1 +; V7A-NEXT: bic r1, r0, r3 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r0, r12, r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_b3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: ldrd r12, r3, [r0] +; V7A-T-NEXT: rsb.w r0, r1, #32 +; V7A-T-NEXT: lsl.w lr, r3, r0 +; V7A-T-NEXT: lsr.w r0, r12, r1 +; V7A-T-NEXT: orr.w r12, r0, lr +; V7A-T-NEXT: subs.w r0, r1, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r12, r3, r0 +; V7A-T-NEXT: lsr.w r0, r3, r1 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs.w r1, r2, #32 +; V7A-T-NEXT: lsl.w r2, r3, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r1 +; V7A-T-NEXT: bic.w r1, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: bic.w r0, r12, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bextr64_b3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r4, r2 +; V6M-NEXT: mov r2, r1 +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r6, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r6, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: pop {r4, r5, r6, pc} + %val = load i64, ptr %w + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %conv = zext i8 %numlowbits to i64 + %notmask = shl i64 -1, %conv + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_b4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_b4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsl.w r3, r2, r12 +; V7M-NEXT: subs.w lr, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r2, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: bics r1, r2 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_b4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsrpl r0, r1, r3 +; V7A-NEXT: lsr r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: subs lr, r12, #32 +; V7A-NEXT: lsl r2, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: lslpl r3, r3, lr +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_b4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r7, lr} +; V7A-T-NEXT: push {r4, r5, r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #16] +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r5, r0, r3 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: subs.w lr, r12, #32 +; V7A-T-NEXT: lsl.w r0, r3, r12 +; V7A-T-NEXT: itt pl +; V7A-T-NEXT: lslpl.w r3, r3, lr +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs.w r4, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r5, r1, r4 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: bic.w r0, r5, r0 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: pop {r4, r5, r7, pc} +; +; V6M-LABEL: bextr64_b4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: ldr r2, [sp, #16] +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r4, r0 +; V6M-NEXT: bics r5, r1 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %shifted, %mask ; swapped order + ret i64 %masked +} + +; 64-bit, but with 32-bit output + +; Everything done in 64-bit, truncation happens last. +define i32 @bextr64_32_b0(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_b0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldrb.w r1, [sp] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsls r2, r1 +; V7M-NEXT: subs r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: bics r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_b0: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldrb r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: lsl r1, r1, r12 +; V7A-NEXT: subs r2, r12, #32 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: bic r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsr.w r12, r0, r2 +; V7A-T-NEXT: rsb.w r0, r2, #32 +; V7A-T-NEXT: ldrb.w r3, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r0, r1, r0 +; V7A-T-NEXT: orr.w r0, r0, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: mov.w r1, #-1 +; V7A-T-NEXT: lsls r1, r3 +; V7A-T-NEXT: subs.w r2, r3, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_b0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: add r1, sp, #8 +; V6M-NEXT: ldrb r2, [r1] +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r4, r0 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: pop {r4, pc} + %shiftedval = lshr i64 %val, %numskipbits + %widenumlowbits = zext i8 %numlowbits to i64 + %notmask = shl nsw i64 -1, %widenumlowbits + %mask = xor i64 %notmask, -1 + %wideres = and i64 %shiftedval, %mask + %res = trunc i64 %wideres to i32 + ret i32 %res +} + +; Shifting happens in 64-bit, then truncation. Masking is 32-bit. +define i32 @bextr64_32_b1(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_b1: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldrb.w r1, [sp] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_b1: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldrb r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r12 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_b1: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldrb.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: mov.w r1, #-1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_b1: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: add r1, sp, #8 +; V6M-NEXT: ldrb r1, [r1] +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: pop {r7, pc} + %shiftedval = lshr i64 %val, %numskipbits + %truncshiftedval = trunc i64 %shiftedval to i32 + %widenumlowbits = zext i8 %numlowbits to i32 + %notmask = shl nsw i32 -1, %widenumlowbits + %mask = xor i32 %notmask, -1 + %res = and i32 %truncshiftedval, %mask + ret i32 %res +} + +; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit. +; Masking is 64-bit. Then truncation. +define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_b2: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldrb.w r1, [sp] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_b2: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldrb r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: bic r0, r0, r1, lsl r12 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_b2: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldrb.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: mov.w r1, #-1 +; V7A-T-NEXT: lsl.w r1, r1, r12 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_b2: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: add r1, sp, #8 +; V6M-NEXT: ldrb r1, [r1] +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: pop {r7, pc} + %shiftedval = lshr i64 %val, %numskipbits + %widenumlowbits = zext i8 %numlowbits to i32 + %notmask = shl nsw i32 -1, %widenumlowbits + %mask = xor i32 %notmask, -1 + %zextmask = zext i32 %mask to i64 + %wideres = and i64 %shiftedval, %zextmask + %res = trunc i64 %wideres to i32 + ret i32 %res +} + +; ---------------------------------------------------------------------------- ; +; Pattern c. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_c0: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c0: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr32_c1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #32 +; V6M-NEXT: subs r1, r1, r2 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %mask = lshr i32 -1, %sh_prom + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_c2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_c2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr32_c3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #32 +; V6M-NEXT: subs r1, r1, r2 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %mask = lshr i32 -1, %sh_prom + %masked = and i32 %mask, %shifted + ret i32 %masked +} + +define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_c4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_c4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_c4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_c4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %shifted, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_c0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: ldr.w r12, [sp] +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsr.w r3, r2, r3 +; V7M-NEXT: rsbs.w r12, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r2, r2, r12 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_c0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r11, lr} +; V7A-NEXT: push {r4, r5, r11, lr} +; V7A-NEXT: ldr r12, [sp, #16] +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r5, r1, r2 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r4, r12, #64 +; V7A-NEXT: rsbs lr, r12, #32 +; V7A-NEXT: lsr r4, r3, r4 +; V7A-NEXT: lsrpl r3, r3, lr +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: movwpl r5, #0 +; V7A-NEXT: and r12, r4, r5 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: mov r1, r12 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: pop {r4, r5, r11, pc} +; +; V7A-T-LABEL: bextr64_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #-1 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orrs r0, r3 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r3 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsbs.w r2, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r2 +; V7A-T-NEXT: rsb.w r2, r12, #64 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: lsr.w r2, lr, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bextr64_c0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: ldr r0, [sp, #16] +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r2, r1, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_c1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr64_c1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: lsr.w r12, r0, r2 +; V7M-NEXT: rsb.w r0, r2, #32 +; V7M-NEXT: lsl.w r0, r1, r0 +; V7M-NEXT: orr.w r12, r12, r0 +; V7M-NEXT: subs.w r0, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r12, r1, r0 +; V7M-NEXT: rsb.w r0, r3, #64 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: uxtb r0, r0 +; V7M-NEXT: subs.w lr, r0, #32 +; V7M-NEXT: lsr.w r2, r3, r0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: and.w r0, r3, r12 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_c1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: uxtb r12, r2 +; V7A-NEXT: lsr lr, r0, r12 +; V7A-NEXT: rsb r0, r12, #32 +; V7A-NEXT: orr r4, lr, r1, lsl r0 +; V7A-NEXT: mvn lr, #31 +; V7A-NEXT: uxtab r2, lr, r2 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: lsrpl r4, r1, r2 +; V7A-NEXT: rsb r2, r3, #64 +; V7A-NEXT: lsr r1, r1, r12 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: uxtb r12, r2 +; V7A-NEXT: uxtab r2, lr, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lsr r0, r3, r12 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: and r1, r0, r1 +; V7A-NEXT: lsrpl r3, r3, r2 +; V7A-NEXT: and r0, r3, r4 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_c1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: uxtb.w r12, r2 +; V7A-T-NEXT: lsr.w lr, r0, r12 +; V7A-T-NEXT: rsb.w r0, r12, #32 +; V7A-T-NEXT: lsl.w r0, r1, r0 +; V7A-T-NEXT: orr.w r4, lr, r0 +; V7A-T-NEXT: mvn lr, #31 +; V7A-T-NEXT: uxtab r2, lr, r2 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r4, r1, r2 +; V7A-T-NEXT: rsb.w r2, r3, #64 +; V7A-T-NEXT: lsr.w r1, r1, r12 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: uxtb.w r12, r2 +; V7A-T-NEXT: uxtab r2, lr, r2 +; V7A-T-NEXT: lsr.w r0, r3, r12 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: and.w r1, r1, r0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r2 +; V7A-T-NEXT: and.w r0, r3, r4 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_c1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r3 +; V6M-NEXT: uxtb r2, r2 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r0, r0, r5 +; V6M-NEXT: uxtb r2, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r6 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %mask = lshr i64 -1, %sh_prom + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_c2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_c2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: ldr.w r12, [sp] +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: orrs r0, r1 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r1 +; V7M-NEXT: lsr.w r1, r3, r2 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: rsbs.w r12, r12, #32 +; V7M-NEXT: lsr.w r3, r2, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r2, r2, r12 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_c2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r8, lr} +; V7A-NEXT: push {r4, r6, r8, lr} +; V7A-NEXT: ldr r12, [sp, #16] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: rsb r6, r12, #64 +; V7A-NEXT: ldr r8, [r0] +; V7A-NEXT: mvn r0, #0 +; V7A-NEXT: rsbs r1, r12, #32 +; V7A-NEXT: lsr r6, r0, r6 +; V7A-NEXT: lsr r4, r3, r2 +; V7A-NEXT: lsrpl r0, r0, r1 +; V7A-NEXT: movwpl r6, #0 +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: and r1, r6, r4 +; V7A-NEXT: lsr r6, r8, r2 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: orr r2, r6, r3, lsl r2 +; V7A-NEXT: lsrpl r2, r3, r12 +; V7A-NEXT: and r0, r0, r2 +; V7A-NEXT: pop {r4, r6, r8, pc} +; +; V7A-T-LABEL: bextr64_c2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldrd r0, r3, [r0] +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: lsl.w r1, r3, r1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: orrs r0, r1 +; V7A-T-NEXT: subs.w r1, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r3, r1 +; V7A-T-NEXT: lsr.w r1, r3, r2 +; V7A-T-NEXT: rsb.w r2, r12, #64 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsbs.w r12, r12, #32 +; V7A-T-NEXT: lsr.w r2, r3, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r3, r3, r12 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: ands r0, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_c2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: ldr r0, [sp, #16] +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r2, r1, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_c3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr64_c3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsr.w r12, r0, r1 +; V7M-NEXT: rsb.w r0, r1, #32 +; V7M-NEXT: lsl.w r0, r3, r0 +; V7M-NEXT: orr.w r12, r12, r0 +; V7M-NEXT: subs.w r0, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r12, r3, r0 +; V7M-NEXT: rsb.w r0, r2, #64 +; V7M-NEXT: lsr.w r1, r3, r1 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: uxtb r0, r0 +; V7M-NEXT: subs.w lr, r0, #32 +; V7M-NEXT: lsr.w r2, r3, r0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: and.w r0, r3, r12 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bextr64_c3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r4, [r0] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: uxtb r0, r1 +; V7A-NEXT: lsr r12, r4, r0 +; V7A-NEXT: rsb r4, r0, #32 +; V7A-NEXT: lsr r0, r3, r0 +; V7A-NEXT: orr lr, r12, r3, lsl r4 +; V7A-NEXT: mvn r12, #31 +; V7A-NEXT: uxtab r1, r12, r1 +; V7A-NEXT: cmp r1, #0 +; V7A-NEXT: lsrpl lr, r3, r1 +; V7A-NEXT: rsb r1, r2, #64 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: uxtb r2, r1 +; V7A-NEXT: uxtab r4, r12, r1 +; V7A-NEXT: lsr r2, r3, r2 +; V7A-NEXT: cmp r4, #0 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r1, r2, r0 +; V7A-NEXT: lsrpl r3, r3, r4 +; V7A-NEXT: and r0, r3, lr +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bextr64_c3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r7, lr} +; V7A-T-NEXT: push {r4, r5, r7, lr} +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: uxtb r0, r1 +; V7A-T-NEXT: rsb.w r3, r0, #32 +; V7A-T-NEXT: lsl.w r4, lr, r3 +; V7A-T-NEXT: lsr.w r3, r12, r0 +; V7A-T-NEXT: orr.w r5, r3, r4 +; V7A-T-NEXT: mvn r12, #31 +; V7A-T-NEXT: uxtab r1, r12, r1 +; V7A-T-NEXT: lsr.w r0, lr, r0 +; V7A-T-NEXT: cmp r1, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r5, lr, r1 +; V7A-T-NEXT: rsb.w r1, r2, #64 +; V7A-T-NEXT: mov.w r4, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: uxtb r2, r1 +; V7A-T-NEXT: uxtab r3, r12, r1 +; V7A-T-NEXT: lsr.w r2, r4, r2 +; V7A-T-NEXT: cmp r3, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: and.w r1, r2, r0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r4, r3 +; V7A-T-NEXT: and.w r0, r4, r5 +; V7A-T-NEXT: pop {r4, r5, r7, pc} +; +; V6M-LABEL: bextr64_c3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r2 +; V6M-NEXT: ldr r4, [r0] +; V6M-NEXT: ldr r3, [r0, #4] +; V6M-NEXT: uxtb r2, r1 +; V6M-NEXT: mov r0, r4 +; V6M-NEXT: mov r1, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r6, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r0, r0, r5 +; V6M-NEXT: uxtb r2, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r6 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %val = load i64, ptr %w + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %mask = lshr i64 -1, %sh_prom + %masked = and i64 %mask, %shifted + ret i64 %masked +} + +define i64 @bextr64_c4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_c4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: ldr.w r12, [sp] +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsr.w r3, r2, r3 +; V7M-NEXT: rsbs.w r12, r12, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r2, r2, r12 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_c4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r11, lr} +; V7A-NEXT: push {r4, r5, r11, lr} +; V7A-NEXT: ldr r12, [sp, #16] +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r5, r1, r2 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r4, r12, #64 +; V7A-NEXT: rsbs lr, r12, #32 +; V7A-NEXT: lsr r4, r3, r4 +; V7A-NEXT: lsrpl r3, r3, lr +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: movwpl r5, #0 +; V7A-NEXT: and r12, r5, r4 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: mov r1, r12 +; V7A-NEXT: and r0, r0, r3 +; V7A-NEXT: pop {r4, r5, r11, pc} +; +; V7A-T-LABEL: bextr64_c4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: mov.w lr, #-1 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orrs r0, r3 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r3 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsbs.w r2, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r2 +; V7A-T-NEXT: rsb.w r2, r12, #64 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: lsr.w r2, lr, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bextr64_c4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: ldr r0, [sp, #16] +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r2, r1, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %shifted, %mask ; swapped order + ret i64 %masked +} + +; 64-bit, but with 32-bit output + +; Everything done in 64-bit, truncation happens last. +define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_c0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: rsbs.w r1, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl r2, r1 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_c0: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r3, [sp] +; V7A-NEXT: rsbs r12, r3, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsrpl r3, r3, r12 +; V7A-NEXT: lsr r12, r0, r2 +; V7A-NEXT: rsb r0, r2, #32 +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r12, r1, lsl r0 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: rsbs.w r1, r12, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r2, r1 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_c0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: ldr r0, [sp, #8] +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r2, r1, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %shifted + %res = trunc i64 %masked to i32 + ret i32 %res +} + +; Shifting happens in 64-bit, then truncation. Masking is 32-bit. +define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_c1: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_c1: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: rsb r1, r12, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_c1: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: rsb.w r1, r12, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_c1: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %truncshifted = trunc i64 %shifted to i32 + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %truncshifted + ret i32 %masked +} + +; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit. +; Masking is 64-bit. Then truncation. +define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_c2: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_c2: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: rsb r1, r12, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_c2: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: rsb.w r1, r12, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_c2: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %zextmask = zext i32 %mask to i64 + %masked = and i64 %zextmask, %shifted + %truncmasked = trunc i64 %masked to i32 + ret i32 %truncmasked +} + +; ---------------------------------------------------------------------------- ; +; Pattern d. 32-bit. +; ---------------------------------------------------------------------------- ; + +define i32 @bextr32_d0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_d0: +; V7M: @ %bb.0: +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_d0: +; V7A: @ %bb.0: +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_d0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %shifted, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +define i32 @bextr32_d1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr32_d1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_d1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_d1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_d1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #32 +; V6M-NEXT: subs r1, r1, r2 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %highbitscleared = shl i32 %shifted, %sh_prom + %masked = lshr i32 %highbitscleared, %sh_prom + ret i32 %masked +} + +define i32 @bextr32_d2_load(ptr %w, i32 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr32_d2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_d2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_d2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_d2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r3, #32 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: lsls r0, r2 +; V6M-NEXT: lsrs r0, r2 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %shifted = lshr i32 %val, %numskipbits + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %shifted, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +define i32 @bextr32_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr32_d3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr32_d3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: rsb r1, r2, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr32_d3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr32_d3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: movs r1, #32 +; V6M-NEXT: subs r1, r1, r2 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %skip = zext i8 %numskipbits to i32 + %shifted = lshr i32 %val, %skip + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %highbitscleared = shl i32 %shifted, %sh_prom + %masked = lshr i32 %highbitscleared, %sh_prom + ret i32 %masked +} + +; 64-bit. + +define i64 @bextr64_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_d0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: rsb.w lr, r12, #32 +; V7M-NEXT: rsb.w r12, r3, #32 +; V7M-NEXT: lsls r1, r3 +; V7M-NEXT: cmp.w lr, #0 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orr.w r1, r1, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, lr +; V7M-NEXT: lsl.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r2, r1, r12 +; V7M-NEXT: lsr.w r0, r0, r3 +; V7M-NEXT: orr.w r0, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, lr +; V7M-NEXT: lsr.w r1, r1, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_d0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: lsr r3, r1, r2 +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: rsb r1, r12, #64 +; V7A-NEXT: rsb lr, r1, #32 +; V7A-NEXT: lsr r2, r0, lr +; V7A-NEXT: orr r2, r2, r3, lsl r1 +; V7A-NEXT: rsbs r3, r12, #32 +; V7A-NEXT: lslpl r2, r0, r3 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: lsr r1, r2, r1 +; V7A-NEXT: orr r0, r0, r2, lsl lr +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lsrpl r0, r2, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orrs r0, r3 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r3 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: rsb.w r3, r12, #64 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsls r1, r3 +; V7A-T-NEXT: rsbs.w r2, r12, #32 +; V7A-T-NEXT: lsr.w r4, r0, lr +; V7A-T-NEXT: orr.w r1, r1, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: lsr.w r1, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_d0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r2, [sp, #8] +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r4, r3, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %shifted, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + ret i64 %masked +} + +define i64 @bextr64_d1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr64_d1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: uxtb.w lr, r2 +; V7M-NEXT: subs.w r2, lr, #32 +; V7M-NEXT: lsr.w r12, r0, lr +; V7M-NEXT: rsb.w r0, lr, #32 +; V7M-NEXT: lsl.w r0, r1, r0 +; V7M-NEXT: orr.w r0, r0, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: rsb.w r2, r3, #64 +; V7M-NEXT: lsr.w r1, r1, lr +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: rsb.w r12, r2, #32 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: sub.w r3, r2, #32 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orrs r1, r4 +; V7M-NEXT: cmp r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, r3 +; V7M-NEXT: lsl.w r0, r0, r2 +; V7M-NEXT: lsl.w r4, r1, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsr.w r0, r0, r2 +; V7M-NEXT: orr.w r0, r0, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_d1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r11, lr} +; V7A-NEXT: push {r4, r5, r11, lr} +; V7A-NEXT: uxtb r12, r2 +; V7A-NEXT: lsr lr, r0, r12 +; V7A-NEXT: rsb r0, r12, #32 +; V7A-NEXT: orr r0, lr, r1, lsl r0 +; V7A-NEXT: mvn lr, #31 +; V7A-NEXT: uxtab r2, lr, r2 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: rsb r2, r3, #64 +; V7A-NEXT: lsr r1, r1, r12 +; V7A-NEXT: uxtb r3, r2 +; V7A-NEXT: rsb r4, r3, #32 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: uxtab r2, lr, r2 +; V7A-NEXT: lsr r5, r0, r4 +; V7A-NEXT: orr r1, r5, r1, lsl r3 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: lslpl r1, r0, r2 +; V7A-NEXT: lsl r0, r0, r3 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r3 +; V7A-NEXT: orr r0, r0, r1, lsl r4 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: lsr r1, r1, r3 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r4, r5, r11, pc} +; +; V7A-T-LABEL: bextr64_d1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r6, r7, lr} +; V7A-T-NEXT: push {r4, r5, r6, r7, lr} +; V7A-T-NEXT: uxtb.w r12, r2 +; V7A-T-NEXT: rsb.w r6, r12, #32 +; V7A-T-NEXT: rsb.w r3, r3, #64 +; V7A-T-NEXT: lsr.w r0, r0, r12 +; V7A-T-NEXT: mvn r7, #31 +; V7A-T-NEXT: uxtab r2, r7, r2 +; V7A-T-NEXT: lsl.w r6, r1, r6 +; V7A-T-NEXT: lsr.w lr, r1, r12 +; V7A-T-NEXT: orrs r0, r6 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w lr, #0 +; V7A-T-NEXT: uxtb r5, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: rsb.w r1, r5, #32 +; V7A-T-NEXT: uxtab r3, r7, r3 +; V7A-T-NEXT: lsl.w r4, lr, r5 +; V7A-T-NEXT: lsr.w r2, r0, r1 +; V7A-T-NEXT: cmp r3, #0 +; V7A-T-NEXT: orr.w r2, r2, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r2, r0, r3 +; V7A-T-NEXT: lsl.w r0, r0, r5 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: lsr.w r0, r0, r5 +; V7A-T-NEXT: orr.w r0, r0, r1 +; V7A-T-NEXT: lsr.w r1, r2, r5 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, r5, r6, r7, pc} +; +; V6M-LABEL: bextr64_d1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r4, r3 +; V6M-NEXT: uxtb r2, r2 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: movs r2, #64 +; V6M-NEXT: subs r2, r2, r4 +; V6M-NEXT: uxtb r4, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %highbitscleared = shl i64 %shifted, %sh_prom + %masked = lshr i64 %highbitscleared, %sh_prom + ret i64 %masked +} + +define i64 @bextr64_d2_load(ptr %w, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_d2_load: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: rsb.w lr, r12, #32 +; V7M-NEXT: orrs r0, r1 +; V7M-NEXT: subs.w r1, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r1 +; V7M-NEXT: rsb.w r1, r12, #64 +; V7M-NEXT: lsr.w r2, r3, r2 +; V7M-NEXT: rsb.w r12, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: cmp.w lr, #0 +; V7M-NEXT: lsl.w r2, r2, r1 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orr.w r2, r2, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r0, lr +; V7M-NEXT: lsl.w r0, r0, r1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r3, r2, r12 +; V7M-NEXT: lsr.w r0, r0, r1 +; V7M-NEXT: lsr.w r1, r2, r1 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r2, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_d2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: ldrd r0, r1, [r0] +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: lsr r3, r1, r2 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: rsb r1, r12, #64 +; V7A-NEXT: rsb lr, r1, #32 +; V7A-NEXT: lsr r2, r0, lr +; V7A-NEXT: orr r2, r2, r3, lsl r1 +; V7A-NEXT: rsbs r3, r12, #32 +; V7A-NEXT: lslpl r2, r0, r3 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: lsr r1, r2, r1 +; V7A-NEXT: orr r0, r0, r2, lsl lr +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lsrpl r0, r2, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_d2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: ldrd r0, r3, [r0] +; V7A-T-NEXT: rsb.w r1, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: lsl.w r1, r3, r1 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: orrs r0, r1 +; V7A-T-NEXT: subs.w r1, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r3, r1 +; V7A-T-NEXT: lsr.w r2, r3, r2 +; V7A-T-NEXT: rsb.w r1, r12, #64 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: rsb.w lr, r1, #32 +; V7A-T-NEXT: rsbs.w r3, r12, #32 +; V7A-T-NEXT: lsl.w r2, r2, r1 +; V7A-T-NEXT: lsr.w r4, r0, lr +; V7A-T-NEXT: orr.w r2, r2, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r2, r0, r3 +; V7A-T-NEXT: lsl.w r0, r0, r1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r2, lr +; V7A-T-NEXT: lsr.w r0, r0, r1 +; V7A-T-NEXT: lsr.w r1, r2, r1 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_d2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: ldr r3, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r2, [sp, #8] +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r4, r3, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %shifted, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + ret i64 %masked +} + +define i64 @bextr64_d3_load_indexzext(ptr %w, i8 %numskipbits, i8 %numlowbits) nounwind { +; V7M-LABEL: bextr64_d3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: ldrd r0, lr, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: subs.w r3, r1, #32 +; V7M-NEXT: lsr.w r12, r0, r1 +; V7M-NEXT: rsb.w r0, r1, #32 +; V7M-NEXT: lsr.w r1, lr, r1 +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: lsl.w r0, lr, r0 +; V7M-NEXT: orr.w r0, r0, r12 +; V7M-NEXT: rsb.w r12, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, lr, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: lsls r1, r2 +; V7M-NEXT: sub.w r3, r2, #32 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orrs r1, r4 +; V7M-NEXT: cmp r3, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, r3 +; V7M-NEXT: lsl.w r0, r0, r2 +; V7M-NEXT: lsl.w r4, r1, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsr.w r0, r0, r2 +; V7M-NEXT: orr.w r0, r0, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_d3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r5, r11, lr} +; V7A-NEXT: push {r4, r5, r11, lr} +; V7A-NEXT: ldr r4, [r0] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: uxtb r0, r1 +; V7A-NEXT: lsr r12, r4, r0 +; V7A-NEXT: rsb r4, r0, #32 +; V7A-NEXT: lsr r0, r3, r0 +; V7A-NEXT: orr r4, r12, r3, lsl r4 +; V7A-NEXT: mvn r12, #31 +; V7A-NEXT: uxtab r1, r12, r1 +; V7A-NEXT: cmp r1, #0 +; V7A-NEXT: lsrpl r4, r3, r1 +; V7A-NEXT: rsb r1, r2, #64 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: uxtb r2, r1 +; V7A-NEXT: rsb lr, r2, #32 +; V7A-NEXT: uxtab r1, r12, r1 +; V7A-NEXT: lsr r5, r4, lr +; V7A-NEXT: orr r3, r5, r0, lsl r2 +; V7A-NEXT: cmp r1, #0 +; V7A-NEXT: lsl r0, r4, r2 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lslpl r3, r4, r1 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: orr r0, r0, r3, lsl lr +; V7A-NEXT: lsrpl r0, r3, r1 +; V7A-NEXT: lsr r1, r3, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r4, r5, r11, pc} +; +; V7A-T-LABEL: bextr64_d3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, r5, r6, lr} +; V7A-T-NEXT: push {r4, r5, r6, lr} +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: uxtb r0, r1 +; V7A-T-NEXT: rsb.w r6, r0, #32 +; V7A-T-NEXT: lsr.w r3, lr, r0 +; V7A-T-NEXT: rsb.w r2, r2, #64 +; V7A-T-NEXT: mvn r4, #31 +; V7A-T-NEXT: lsr.w r0, r12, r0 +; V7A-T-NEXT: uxtab r1, r4, r1 +; V7A-T-NEXT: lsl.w r6, lr, r6 +; V7A-T-NEXT: orrs r0, r6 +; V7A-T-NEXT: cmp r1, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: uxtb r5, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, lr, r1 +; V7A-T-NEXT: rsb.w r1, r5, #32 +; V7A-T-NEXT: lsls r3, r5 +; V7A-T-NEXT: uxtab r2, r4, r2 +; V7A-T-NEXT: lsr.w r6, r0, r1 +; V7A-T-NEXT: orrs r3, r6 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r5 +; V7A-T-NEXT: lsl.w r1, r3, r1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsr.w r0, r0, r5 +; V7A-T-NEXT: orr.w r0, r0, r1 +; V7A-T-NEXT: lsr.w r1, r3, r5 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r3, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, r5, r6, pc} +; +; V6M-LABEL: bextr64_d3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r2 +; V6M-NEXT: ldr r5, [r0] +; V6M-NEXT: ldr r3, [r0, #4] +; V6M-NEXT: uxtb r2, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r3 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: movs r2, #64 +; V6M-NEXT: subs r2, r2, r4 +; V6M-NEXT: uxtb r4, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %skip = zext i8 %numskipbits to i64 + %shifted = lshr i64 %val, %skip + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %highbitscleared = shl i64 %shifted, %sh_prom + %masked = lshr i64 %highbitscleared, %sh_prom + ret i64 %masked +} + +; 64-bit, but with 32-bit output + +; Everything done in 64-bit, truncation happens last. +define i32 @bextr64_32_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_d0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r4, lr} +; V7M-NEXT: push {r4, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: ldr.w r12, [sp, #8] +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orrs r0, r3 +; V7M-NEXT: subs.w r3, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r3 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: rsb.w r3, r12, #64 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: rsb.w lr, r12, #32 +; V7M-NEXT: rsb.w r12, r3, #32 +; V7M-NEXT: lsls r1, r3 +; V7M-NEXT: cmp.w lr, #0 +; V7M-NEXT: lsr.w r4, r0, r12 +; V7M-NEXT: orr.w r1, r1, r4 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, lr +; V7M-NEXT: lsl.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r2, r1, r12 +; V7M-NEXT: lsr.w r0, r0, r3 +; V7M-NEXT: orr.w r0, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, lr +; V7M-NEXT: pop {r4, pc} +; +; V7A-LABEL: bextr64_32_d0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: lsr r3, r1, r2 +; V7A-NEXT: subs lr, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: rsb r2, r2, #32 +; V7A-NEXT: ldr r12, [sp, #8] +; V7A-NEXT: movwpl r3, #0 +; V7A-NEXT: orr r0, r0, r1, lsl r2 +; V7A-NEXT: lsrpl r0, r1, lr +; V7A-NEXT: rsb r1, r12, #64 +; V7A-NEXT: rsb lr, r1, #32 +; V7A-NEXT: lsr r2, r0, lr +; V7A-NEXT: orr r2, r2, r3, lsl r1 +; V7A-NEXT: rsbs r3, r12, #32 +; V7A-NEXT: lslpl r2, r0, r3 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: orr r0, r0, r2, lsl lr +; V7A-NEXT: lsrpl r0, r2, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bextr64_32_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: ldr.w r12, [sp, #8] +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orrs r0, r3 +; V7A-T-NEXT: subs.w r3, r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r3 +; V7A-T-NEXT: lsr.w r1, r1, r2 +; V7A-T-NEXT: rsb.w r3, r12, #64 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsls r1, r3 +; V7A-T-NEXT: rsbs.w r2, r12, #32 +; V7A-T-NEXT: lsr.w r4, r0, lr +; V7A-T-NEXT: orr.w r1, r1, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bextr64_32_d0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r2, [sp, #8] +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r4, r3, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %shifted = lshr i64 %val, %numskipbits + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %shifted, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + %res = trunc i64 %masked to i32 + ret i32 %res +} + +; Shifting happens in 64-bit, then truncation. Masking is 32-bit. +define i32 @bextr64_32_d1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind { +; V7M-LABEL: bextr64_32_d1: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsrs r0, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: ldr r1, [sp] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bextr64_32_d1: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: ldr r12, [sp] +; V7A-NEXT: subs r2, r2, #32 +; V7A-NEXT: orr r0, r0, r1, lsl r3 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: rsb r1, r12, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bextr64_32_d1: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: lsrs r0, r2 +; V7A-T-NEXT: ldr.w r12, [sp] +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: lsl.w r3, r1, r3 +; V7A-T-NEXT: orr.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: rsb.w r1, r12, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bextr64_32_d1: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r7, lr} +; V6M-NEXT: push {r7, lr} +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldr r1, [sp, #8] +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: pop {r7, pc} + %shifted = lshr i64 %val, %numskipbits + %truncshifted = trunc i64 %shifted to i32 + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %truncshifted, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +; ---------------------------------------------------------------------------- ; +; Constant +; ---------------------------------------------------------------------------- ; + +; https://bugs.llvm.org/show_bug.cgi?id=38938 +define void @pr38938(ptr %a0, ptr %a1) nounwind { +; V7M-LABEL: pr38938: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r1, [r1] +; V7M-NEXT: ubfx r1, r1, #21, #10 +; V7M-NEXT: ldr.w r2, [r0, r1, lsl #2] +; V7M-NEXT: adds r2, #1 +; V7M-NEXT: str.w r2, [r0, r1, lsl #2] +; V7M-NEXT: bx lr +; +; V7A-LABEL: pr38938: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r1, [r1] +; V7A-NEXT: ubfx r1, r1, #21, #10 +; V7A-NEXT: ldr r2, [r0, r1, lsl #2] +; V7A-NEXT: add r2, r2, #1 +; V7A-NEXT: str r2, [r0, r1, lsl #2] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: pr38938: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r1, [r1] +; V7A-T-NEXT: ubfx r1, r1, #21, #10 +; V7A-T-NEXT: ldr.w r2, [r0, r1, lsl #2] +; V7A-T-NEXT: adds r2, #1 +; V7A-T-NEXT: str.w r2, [r0, r1, lsl #2] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: pr38938: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r1] +; V6M-NEXT: lsrs r1, r1, #19 +; V6M-NEXT: ldr r2, .LCPI51_0 +; V6M-NEXT: ands r2, r1 +; V6M-NEXT: ldr r1, [r0, r2] +; V6M-NEXT: adds r1, r1, #1 +; V6M-NEXT: str r1, [r0, r2] +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI51_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp = load i64, ptr %a1, align 8 + %tmp1 = lshr i64 %tmp, 21 + %tmp2 = and i64 %tmp1, 1023 + %tmp3 = getelementptr inbounds i32, ptr %a0, i64 %tmp2 + %tmp4 = load i32, ptr %tmp3, align 4 + %tmp5 = add nsw i32 %tmp4, 1 + store i32 %tmp5, ptr %tmp3, align 4 + ret void +} + +; The most canonical variant +define i32 @c0_i32(i32 %arg) nounwind { +; V7M-LABEL: c0_i32: +; V7M: @ %bb.0: +; V7M-NEXT: ubfx r0, r0, #19, #10 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c0_i32: +; V7A: @ %bb.0: +; V7A-NEXT: ubfx r0, r0, #19, #10 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c0_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ubfx r0, r0, #19, #10 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c0_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsls r0, r0, #3 +; V6M-NEXT: lsrs r0, r0, #22 +; V6M-NEXT: bx lr + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 1023 + ret i32 %tmp1 +} + +; Should be still fine, but the mask is shifted +define i32 @c1_i32(i32 %arg) nounwind { +; V7M-LABEL: c1_i32: +; V7M: @ %bb.0: +; V7M-NEXT: movw r1, #4092 +; V7M-NEXT: and.w r0, r1, r0, lsr #19 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c1_i32: +; V7A: @ %bb.0: +; V7A-NEXT: movw r1, #4092 +; V7A-NEXT: and r0, r1, r0, lsr #19 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c1_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r1, #4092 +; V7A-T-NEXT: and.w r0, r1, r0, lsr #19 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c1_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r1, r0, #19 +; V6M-NEXT: ldr r0, .LCPI53_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI53_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 4092 + ret i32 %tmp1 +} + +; Should be still fine, but the result is shifted left afterwards +define i32 @c2_i32(i32 %arg) nounwind { +; V7M-LABEL: c2_i32: +; V7M: @ %bb.0: +; V7M-NEXT: movw r1, #4092 +; V7M-NEXT: and.w r0, r1, r0, lsr #17 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c2_i32: +; V7A: @ %bb.0: +; V7A-NEXT: movw r1, #4092 +; V7A-NEXT: and r0, r1, r0, lsr #17 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c2_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r1, #4092 +; V7A-T-NEXT: and.w r0, r1, r0, lsr #17 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c2_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r1, r0, #17 +; V6M-NEXT: ldr r0, .LCPI54_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI54_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 1023 + %tmp2 = shl i32 %tmp1, 2 + ret i32 %tmp2 +} + +; The mask covers newly shifted-in bit +define i32 @c4_i32_bad(i32 %arg) nounwind { +; V7M-LABEL: c4_i32_bad: +; V7M: @ %bb.0: +; V7M-NEXT: mvn r1, #1 +; V7M-NEXT: and.w r0, r1, r0, lsr #19 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c4_i32_bad: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r1, #1 +; V7A-NEXT: and r0, r1, r0, lsr #19 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c4_i32_bad: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mvn r1, #1 +; V7A-T-NEXT: and.w r0, r1, r0, lsr #19 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c4_i32_bad: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r0, #20 +; V6M-NEXT: lsls r0, r0, #1 +; V6M-NEXT: bx lr + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 16382 + ret i32 %tmp1 +} + +; i64 + +; The most canonical variant +define i64 @c0_i64(i64 %arg) nounwind { +; V7M-LABEL: c0_i64: +; V7M: @ %bb.0: +; V7M-NEXT: ubfx r0, r1, #19, #10 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c0_i64: +; V7A: @ %bb.0: +; V7A-NEXT: ubfx r0, r1, #19, #10 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c0_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ubfx r0, r1, #19, #10 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c0_i64: +; V6M: @ %bb.0: +; V6M-NEXT: lsls r0, r1, #3 +; V6M-NEXT: lsrs r0, r0, #22 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + ret i64 %tmp1 +} + +; Should be still fine, but the mask is shifted +define i64 @c1_i64(i64 %arg) nounwind { +; V7M-LABEL: c1_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movw r0, #4092 +; V7M-NEXT: and.w r0, r0, r1, lsr #19 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c1_i64: +; V7A: @ %bb.0: +; V7A-NEXT: movw r0, #4092 +; V7A-NEXT: and r0, r0, r1, lsr #19 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c1_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r0, #4092 +; V7A-T-NEXT: and.w r0, r0, r1, lsr #19 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c1_i64: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r1, r1, #19 +; V6M-NEXT: ldr r0, .LCPI57_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI57_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 4092 + ret i64 %tmp1 +} + +; Should be still fine, but the result is shifted left afterwards +define i64 @c2_i64(i64 %arg) nounwind { +; V7M-LABEL: c2_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movw r0, #4092 +; V7M-NEXT: and.w r0, r0, r1, lsr #17 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c2_i64: +; V7A: @ %bb.0: +; V7A-NEXT: movw r0, #4092 +; V7A-NEXT: and r0, r0, r1, lsr #17 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c2_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r0, #4092 +; V7A-T-NEXT: and.w r0, r0, r1, lsr #17 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c2_i64: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r1, r1, #17 +; V6M-NEXT: ldr r0, .LCPI58_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI58_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + %tmp2 = shl i64 %tmp1, 2 + ret i64 %tmp2 +} + +; The mask covers newly shifted-in bit +define i64 @c4_i64_bad(i64 %arg) nounwind { +; V7M-LABEL: c4_i64_bad: +; V7M: @ %bb.0: +; V7M-NEXT: mvn r0, #1 +; V7M-NEXT: and.w r0, r0, r1, lsr #19 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: c4_i64_bad: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r0, #1 +; V7A-NEXT: and r0, r0, r1, lsr #19 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c4_i64_bad: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mvn r0, #1 +; V7A-T-NEXT: and.w r0, r0, r1, lsr #19 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c4_i64_bad: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r1, #20 +; V6M-NEXT: lsls r0, r0, #1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 16382 + ret i64 %tmp1 +} + +; ---------------------------------------------------------------------------- ; +; Constant, storing the result afterwards. +; ---------------------------------------------------------------------------- ; + +; i32 + +; The most canonical variant +define void @c5_i32(i32 %arg, ptr %ptr) nounwind { +; V7M-LABEL: c5_i32: +; V7M: @ %bb.0: +; V7M-NEXT: ubfx r0, r0, #19, #10 +; V7M-NEXT: str r0, [r1] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c5_i32: +; V7A: @ %bb.0: +; V7A-NEXT: ubfx r0, r0, #19, #10 +; V7A-NEXT: str r0, [r1] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c5_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ubfx r0, r0, #19, #10 +; V7A-T-NEXT: str r0, [r1] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c5_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsls r0, r0, #3 +; V6M-NEXT: lsrs r0, r0, #22 +; V6M-NEXT: str r0, [r1] +; V6M-NEXT: bx lr + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 1023 + store i32 %tmp1, ptr %ptr + ret void +} + +; Should be still fine, but the mask is shifted +define void @c6_i32(i32 %arg, ptr %ptr) nounwind { +; V7M-LABEL: c6_i32: +; V7M: @ %bb.0: +; V7M-NEXT: ubfx r0, r0, #19, #12 +; V7M-NEXT: str r0, [r1] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c6_i32: +; V7A: @ %bb.0: +; V7A-NEXT: ubfx r0, r0, #19, #12 +; V7A-NEXT: str r0, [r1] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c6_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ubfx r0, r0, #19, #12 +; V7A-T-NEXT: str r0, [r1] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c6_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsls r0, r0, #1 +; V6M-NEXT: lsrs r0, r0, #20 +; V6M-NEXT: str r0, [r1] +; V6M-NEXT: bx lr + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 4095 + store i32 %tmp1, ptr %ptr + ret void +} + +; Should be still fine, but the result is shifted left afterwards +define void @c7_i32(i32 %arg, ptr %ptr) nounwind { +; V7M-LABEL: c7_i32: +; V7M: @ %bb.0: +; V7M-NEXT: movw r2, #4092 +; V7M-NEXT: and.w r0, r2, r0, lsr #17 +; V7M-NEXT: str r0, [r1] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c7_i32: +; V7A: @ %bb.0: +; V7A-NEXT: movw r2, #4092 +; V7A-NEXT: and r0, r2, r0, lsr #17 +; V7A-NEXT: str r0, [r1] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c7_i32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movw r2, #4092 +; V7A-T-NEXT: and.w r0, r2, r0, lsr #17 +; V7A-T-NEXT: str r0, [r1] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c7_i32: +; V6M: @ %bb.0: +; V6M-NEXT: lsrs r0, r0, #17 +; V6M-NEXT: ldr r2, .LCPI62_0 +; V6M-NEXT: ands r2, r0 +; V6M-NEXT: str r2, [r1] +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI62_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i32 %arg, 19 + %tmp1 = and i32 %tmp0, 1023 + %tmp2 = shl i32 %tmp1, 2 + store i32 %tmp2, ptr %ptr + ret void +} + +; i64 + +; The most canonical variant +define void @c5_i64(i64 %arg, ptr %ptr) nounwind { +; V7M-LABEL: c5_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movs r0, #0 +; V7M-NEXT: ubfx r1, r1, #19, #10 +; V7M-NEXT: strd r1, r0, [r2] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c5_i64: +; V7A: @ %bb.0: +; V7A-NEXT: mov r0, #0 +; V7A-NEXT: str r0, [r2, #4] +; V7A-NEXT: ubfx r0, r1, #19, #10 +; V7A-NEXT: str r0, [r2] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c5_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r0, #0 +; V7A-T-NEXT: ubfx r1, r1, #19, #10 +; V7A-T-NEXT: strd r1, r0, [r2] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c5_i64: +; V6M: @ %bb.0: +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: lsls r1, r1, #3 +; V6M-NEXT: lsrs r1, r1, #22 +; V6M-NEXT: str r1, [r2] +; V6M-NEXT: str r0, [r2, #4] +; V6M-NEXT: bx lr + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + store i64 %tmp1, ptr %ptr + ret void +} + +; Should be still fine, but the mask is shifted +define void @c6_i64(i64 %arg, ptr %ptr) nounwind { +; V7M-LABEL: c6_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movs r0, #0 +; V7M-NEXT: ubfx r1, r1, #19, #12 +; V7M-NEXT: strd r1, r0, [r2] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c6_i64: +; V7A: @ %bb.0: +; V7A-NEXT: mov r0, #0 +; V7A-NEXT: str r0, [r2, #4] +; V7A-NEXT: ubfx r0, r1, #19, #12 +; V7A-NEXT: str r0, [r2] +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c6_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r0, #0 +; V7A-T-NEXT: ubfx r1, r1, #19, #12 +; V7A-T-NEXT: strd r1, r0, [r2] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c6_i64: +; V6M: @ %bb.0: +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: lsls r1, r1, #1 +; V6M-NEXT: lsrs r1, r1, #20 +; V6M-NEXT: str r1, [r2] +; V6M-NEXT: str r0, [r2, #4] +; V6M-NEXT: bx lr + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 4095 + store i64 %tmp1, ptr %ptr + ret void +} + +; Should be still fine, but the result is shifted left afterwards +define void @c7_i64(i64 %arg, ptr %ptr) nounwind { +; V7M-LABEL: c7_i64: +; V7M: @ %bb.0: +; V7M-NEXT: movs r0, #0 +; V7M-NEXT: movw r3, #4092 +; V7M-NEXT: and.w r1, r3, r1, lsr #17 +; V7M-NEXT: strd r1, r0, [r2] +; V7M-NEXT: bx lr +; +; V7A-LABEL: c7_i64: +; V7A: @ %bb.0: +; V7A-NEXT: movw r0, #4092 +; V7A-NEXT: mov r3, #0 +; V7A-NEXT: and r0, r0, r1, lsr #17 +; V7A-NEXT: stm r2, {r0, r3} +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: c7_i64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r0, #0 +; V7A-T-NEXT: movw r3, #4092 +; V7A-T-NEXT: and.w r1, r3, r1, lsr #17 +; V7A-T-NEXT: strd r1, r0, [r2] +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: c7_i64: +; V6M: @ %bb.0: +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: lsrs r1, r1, #17 +; V6M-NEXT: ldr r3, .LCPI65_0 +; V6M-NEXT: ands r3, r1 +; V6M-NEXT: str r3, [r2] +; V6M-NEXT: str r0, [r2, #4] +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI65_0: +; V6M-NEXT: .long 4092 @ 0xffc + %tmp0 = lshr i64 %arg, 51 + %tmp1 = and i64 %tmp0, 1023 + %tmp2 = shl i64 %tmp1, 2 + store i64 %tmp2, ptr %ptr + ret void +} diff --git a/llvm/test/CodeGen/ARM/extract-lowbits.ll b/llvm/test/CodeGen/ARM/extract-lowbits.ll new file mode 100644 index 0000000..b483793 --- /dev/null +++ b/llvm/test/CodeGen/ARM/extract-lowbits.ll @@ -0,0 +1,2752 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefix V7M +; RUN: llc -mtriple=armv7a-eabi %s -o - | FileCheck %s --check-prefix V7A +; RUN: llc -mtriple=thumbv7a-eabi %s -o - | FileCheck %s --check-prefix V7A-T +; RUN: llc -mtriple=armv6m-eabi %s -o - | FileCheck %s --check-prefix V6M + +; Patterns: +; a) x & (1 << nbits) - 1 +; b) x & ~(-1 << nbits) +; c) x & (-1 >> (32 - y)) +; d) x << (32 - y) >> (32 - y) +; are equivalent. + +; ---------------------------------------------------------------------------- ; +; Pattern a. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bzhi32_a0(i32 %val, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_a0: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a0: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_a1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi32_a1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %conv = zext i8 %numlowbits to i32 + %onebit = shl i32 1, %conv + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_a2_load(ptr %w, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_a2_load: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a2_load: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi32_a3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r1, r0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %conv = zext i8 %numlowbits to i32 + %onebit = shl i32 1, %conv + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_a4_commutative(i32 %val, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_a4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: movs r2, #1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: subs r1, #1 +; V7M-NEXT: ands r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_a4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: add r1, r3, r2, lsl r1 +; V7A-NEXT: and r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_a4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: subs r1, #1 +; V7A-T-NEXT: ands r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_a4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #1 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: subs r1, r2, #1 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %onebit = shl i32 1, %numlowbits + %mask = add nsw i32 %onebit, -1 + %masked = and i32 %val, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bzhi64_a0(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_a0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: sbc r3, r3, #0 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_a0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: lsr lr, r12, r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsl r2, r12, r2 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl lr, r12, r3 +; V7A-NEXT: subs r2, r2, #1 +; V7A-NEXT: sbc r3, lr, #0 +; V7A-NEXT: and r0, r2, r0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_a0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: mov.w r12, #1 +; V7A-T-NEXT: subs.w lr, r2, #32 +; V7A-T-NEXT: lsl.w r2, r12, r2 +; V7A-T-NEXT: lsr.w r3, r12, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r12, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs r2, #1 +; V7A-T-NEXT: sbc r3, r3, #0 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r6, #0 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: sbcs r1, r6 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +; Check that we don't throw away the vreg_width-1 mask if not using shifts +define i64 @bzhi64_a0_masked(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_a0_masked: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: and r2, r2, #63 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: sbc r3, r3, #0 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_a0_masked: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: and r2, r2, #63 +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr lr, r12, r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsl r2, r12, r2 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl lr, r12, r3 +; V7A-NEXT: subs r2, r2, #1 +; V7A-NEXT: sbc r3, lr, #0 +; V7A-NEXT: and r0, r2, r0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_a0_masked: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: and r2, r2, #63 +; V7A-T-NEXT: mov.w r12, #1 +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: subs.w lr, r2, #32 +; V7A-T-NEXT: lsl.w r2, r12, r2 +; V7A-T-NEXT: lsr.w r3, r12, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r12, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs r2, #1 +; V7A-T-NEXT: sbc r3, r3, #0 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a0_masked: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #63 +; V6M-NEXT: ands r2, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r6, #0 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: sbcs r1, r6 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %numlowbits.masked = and i64 %numlowbits, 63 + %onebit = shl i64 1, %numlowbits.masked + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_a1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi64_a1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: sbc r3, r3, #0 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_a1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: lsr lr, r12, r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsl r2, r12, r2 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl lr, r12, r3 +; V7A-NEXT: subs r2, r2, #1 +; V7A-NEXT: sbc r3, lr, #0 +; V7A-NEXT: and r0, r2, r0 +; V7A-NEXT: and r1, r3, r1 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_a1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: mov.w r12, #1 +; V7A-T-NEXT: subs.w lr, r2, #32 +; V7A-T-NEXT: lsl.w r2, r12, r2 +; V7A-T-NEXT: lsr.w r3, r12, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r12, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs r2, #1 +; V7A-T-NEXT: sbc r3, r3, #0 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r6, #0 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: sbcs r1, r6 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %conv = zext i8 %numlowbits to i64 + %onebit = shl i64 1, %conv + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_a2_load(ptr %w, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_a2_load: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r2, #32 +; V7M-NEXT: movs r3, #1 +; V7M-NEXT: subs.w r12, r2, #32 +; V7M-NEXT: lsl.w r2, r3, r2 +; V7M-NEXT: lsr.w r1, r3, r1 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r3, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: sbc r1, r1, #0 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_a2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r11, lr} +; V7A-NEXT: push {r4, r6, r11, lr} +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: mov r1, #1 +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: rsb r0, r2, #32 +; V7A-NEXT: subs r4, r2, #32 +; V7A-NEXT: lsr r0, r1, r0 +; V7A-NEXT: lslpl r0, r1, r4 +; V7A-NEXT: lsl r1, r1, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: subs r2, r1, #1 +; V7A-NEXT: sbc r0, r0, #0 +; V7A-NEXT: and r1, r0, r3 +; V7A-NEXT: and r0, r2, r6 +; V7A-NEXT: pop {r4, r6, r11, pc} +; +; V7A-T-LABEL: bzhi64_a2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: movs r1, #1 +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: subs.w r0, r2, #32 +; V7A-T-NEXT: lsr.w r3, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r1, r0 +; V7A-T-NEXT: lsl.w r0, r1, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs r0, #1 +; V7A-T-NEXT: sbc r1, r3, #0 +; V7A-T-NEXT: and.w r0, r0, r12 +; V7A-T-NEXT: and.w r1, r1, lr +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r5, #0 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r2, r0, #1 +; V6M-NEXT: sbcs r1, r5 +; V6M-NEXT: ldm r4!, {r0, r3} +; V6M-NEXT: ands r1, r3 +; V6M-NEXT: ands r0, r2 +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_a3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi64_a3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r2, r1, #32 +; V7M-NEXT: movs r3, #1 +; V7M-NEXT: subs.w r12, r1, #32 +; V7M-NEXT: lsl.w r1, r3, r1 +; V7M-NEXT: lsr.w r2, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r3, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: subs r3, r1, #1 +; V7M-NEXT: sbc r1, r2, #0 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_a3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r11, lr} +; V7A-NEXT: push {r4, r6, r11, lr} +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: mov r2, #1 +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: rsb r0, r1, #32 +; V7A-NEXT: subs r4, r1, #32 +; V7A-NEXT: lsl r1, r2, r1 +; V7A-NEXT: lsr r0, r2, r0 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: lslpl r0, r2, r4 +; V7A-NEXT: subs r2, r1, #1 +; V7A-NEXT: sbc r0, r0, #0 +; V7A-NEXT: and r1, r0, r3 +; V7A-NEXT: and r0, r2, r6 +; V7A-NEXT: pop {r4, r6, r11, pc} +; +; V7A-T-LABEL: bzhi64_a3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r1, #32 +; V7A-T-NEXT: movs r2, #1 +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: subs.w r0, r1, #32 +; V7A-T-NEXT: lsr.w r3, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r2, r0 +; V7A-T-NEXT: lsl.w r0, r2, r1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: subs r0, #1 +; V7A-T-NEXT: sbc r1, r3, #0 +; V7A-T-NEXT: and.w r0, r0, r12 +; V7A-T-NEXT: and.w r1, r1, lr +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r2, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r5, #0 +; V6M-NEXT: mov r1, r5 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r2, r0, #1 +; V6M-NEXT: sbcs r1, r5 +; V6M-NEXT: ldm r4!, {r0, r3} +; V6M-NEXT: ands r1, r3 +; V6M-NEXT: ands r0, r2 +; V6M-NEXT: pop {r4, r5, r7, pc} + %val = load i64, ptr %w + %conv = zext i8 %numlowbits to i64 + %onebit = shl i64 1, %conv + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_a4_commutative(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_a4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: mov.w r12, #1 +; V7M-NEXT: subs.w lr, r2, #32 +; V7M-NEXT: lsl.w r2, r12, r2 +; V7M-NEXT: lsr.w r3, r12, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r12, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: subs r2, #1 +; V7M-NEXT: sbc r3, r3, #0 +; V7M-NEXT: ands r0, r2 +; V7M-NEXT: ands r1, r3 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_a4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: mov r12, #1 +; V7A-NEXT: lsr lr, r12, r3 +; V7A-NEXT: subs r3, r2, #32 +; V7A-NEXT: lsl r2, r12, r2 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: lslpl lr, r12, r3 +; V7A-NEXT: subs r2, r2, #1 +; V7A-NEXT: sbc r3, lr, #0 +; V7A-NEXT: and r0, r0, r2 +; V7A-NEXT: and r1, r1, r3 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_a4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #32 +; V7A-T-NEXT: mov.w r12, #1 +; V7A-T-NEXT: subs.w lr, r2, #32 +; V7A-T-NEXT: lsl.w r2, r12, r2 +; V7A-T-NEXT: lsr.w r3, r12, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r3, r12, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: subs r2, #1 +; V7A-T-NEXT: sbc r3, r3, #0 +; V7A-T-NEXT: ands r0, r2 +; V7A-T-NEXT: ands r1, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_a4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r6, lr} +; V6M-NEXT: push {r4, r5, r6, lr} +; V6M-NEXT: mov r5, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #1 +; V6M-NEXT: movs r6, #0 +; V6M-NEXT: mov r1, r6 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: subs r0, r0, #1 +; V6M-NEXT: sbcs r1, r6 +; V6M-NEXT: ands r1, r5 +; V6M-NEXT: ands r0, r4 +; V6M-NEXT: pop {r4, r5, r6, pc} + %onebit = shl i64 1, %numlowbits + %mask = add nsw i64 %onebit, -1 + %masked = and i64 %val, %mask ; swapped order + ret i64 %masked +} + +; ---------------------------------------------------------------------------- ; +; Pattern b. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bzhi32_b0(i32 %val, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_b0: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b0: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_b1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi32_b1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %conv = zext i8 %numlowbits to i32 + %notmask = shl i32 -1, %conv + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_b2_load(ptr %w, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_b2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi32_b3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %conv = zext i8 %numlowbits to i32 + %notmask = shl i32 -1, %conv + %mask = xor i32 %notmask, -1 + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_b4_commutative(i32 %val, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_b4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: lsl.w r1, r2, r1 +; V7M-NEXT: bics r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_b4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: bic r0, r0, r2, lsl r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_b4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: lsl.w r1, r2, r1 +; V7A-T-NEXT: bics r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_b4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #0 +; V6M-NEXT: mvns r2, r2 +; V6M-NEXT: lsls r2, r1 +; V6M-NEXT: bics r0, r2 +; V6M-NEXT: bx lr + %notmask = shl i32 -1, %numlowbits + %mask = xor i32 %notmask, -1 + %masked = and i32 %val, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bzhi64_b0(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_b0: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r12, r3, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl.w r12, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r2 +; V7M-NEXT: bic.w r0, r0, r12 +; V7M-NEXT: bics r1, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b0: +; V7A: @ %bb.0: +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_b0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r12, r3, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r2 +; V7A-T-NEXT: bic.w r0, r0, r12 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r4, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_b1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi64_b1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r12, r3, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl.w r12, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r2 +; V7M-NEXT: bic.w r0, r0, r12 +; V7M-NEXT: bics r1, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_b1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r12, r3, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r2 +; V7A-T-NEXT: bic.w r0, r0, r12 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r4, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %conv = zext i8 %numlowbits to i64 + %notmask = shl i64 -1, %conv + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_b2_load(ptr %w, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_b2_load: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r1, #-1 +; V7M-NEXT: subs.w r12, r2, #32 +; V7M-NEXT: lsl.w r3, r1, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r1, r12 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: bic.w r1, r2, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, lr} +; V7A-NEXT: push {r4, lr} +; V7A-NEXT: ldr r4, [r0] +; V7A-NEXT: mvn r1, #0 +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: subs r0, r2, #32 +; V7A-NEXT: lsl r2, r1, r2 +; V7A-NEXT: lslpl r1, r1, r0 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r1, r3, r1 +; V7A-NEXT: bic r0, r4, r2 +; V7A-NEXT: pop {r4, pc} +; +; V7A-T-LABEL: bzhi64_b2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r1, #-1 +; V7A-T-NEXT: ldrd r0, r12, [r0] +; V7A-T-NEXT: lsl.w r3, r1, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r1, r2 +; V7A-T-NEXT: bics r0, r3 +; V7A-T-NEXT: bic.w r1, r12, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: ldm r4!, {r2, r3} +; V6M-NEXT: bics r2, r0 +; V6M-NEXT: bics r3, r1 +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: mov r1, r3 +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_b3_load_indexzext(ptr %w, i8 zeroext %numlowbits) nounwind { +; V7M-LABEL: bzhi64_b3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r2, #-1 +; V7M-NEXT: subs.w r12, r1, #32 +; V7M-NEXT: lsl.w r3, r2, r1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r3, #0 +; V7M-NEXT: ldrd r0, r1, [r0] +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r2, r12 +; V7M-NEXT: bics r1, r2 +; V7M-NEXT: bics r0, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r11, lr} +; V7A-NEXT: push {r4, r6, r11, lr} +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: subs r0, r1, #32 +; V7A-NEXT: lsl r4, r2, r1 +; V7A-NEXT: lslpl r2, r2, r0 +; V7A-NEXT: movwpl r4, #0 +; V7A-NEXT: bic r1, r3, r2 +; V7A-NEXT: bic r0, r6, r4 +; V7A-NEXT: pop {r4, r6, r11, pc} +; +; V7A-T-LABEL: bzhi64_b3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: ldrd r0, r12, [r0] +; V7A-T-NEXT: lsl.w r3, r2, r1 +; V7A-T-NEXT: subs r1, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r3, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r2, r1 +; V7A-T-NEXT: bics r0, r3 +; V7A-T-NEXT: bic.w r1, r12, r2 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r2, r1 +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: ldm r4!, {r2, r3} +; V6M-NEXT: bics r2, r0 +; V6M-NEXT: bics r3, r1 +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: mov r1, r3 +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %conv = zext i8 %numlowbits to i64 + %notmask = shl i64 -1, %conv + %mask = xor i64 %notmask, -1 + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_b4_commutative(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_b4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsl.w r12, r3, r2 +; V7M-NEXT: subs r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: movpl.w r12, #0 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl r3, r2 +; V7M-NEXT: bic.w r0, r0, r12 +; V7M-NEXT: bics r1, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_b4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: subs r12, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsl r2, r3, r2 +; V7A-NEXT: lslpl r3, r3, r12 +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: bic r1, r1, r3 +; V7A-NEXT: bic r0, r0, r2 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_b4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsl.w r12, r3, r2 +; V7A-T-NEXT: subs r2, #32 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl r3, r2 +; V7A-T-NEXT: bic.w r0, r0, r12 +; V7A-T-NEXT: bics r1, r3 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_b4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: bics r5, r0 +; V6M-NEXT: bics r4, r1 +; V6M-NEXT: mov r0, r5 +; V6M-NEXT: mov r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %notmask = shl i64 -1, %numlowbits + %mask = xor i64 %notmask, -1 + %masked = and i64 %val, %mask ; swapped order + ret i64 %masked +} + +; ---------------------------------------------------------------------------- ; +; Pattern c. 32-bit +; ---------------------------------------------------------------------------- ; + +define i32 @bzhi32_c0(i32 %val, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_c0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c0: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_c1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %mask = lshr i32 -1, %sh_prom + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_c2_load(ptr %w, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_c2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_c3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %mask = lshr i32 -1, %sh_prom + %masked = and i32 %mask, %val + ret i32 %masked +} + +define i32 @bzhi32_c4_commutative(i32 %val, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_c4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_c4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_c4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_c4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i32 32, %numlowbits + %mask = lshr i32 -1, %numhighbits + %masked = and i32 %val, %mask ; swapped order + ret i32 %masked +} + +; 64-bit + +define i64 @bzhi64_c0(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_c0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsbs.w lr, r2, #32 +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: mov.w r12, #-1 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsr.w r2, r12, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_c0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsbs lr, r2, #32 +; V7A-NEXT: rsb r2, r2, #64 +; V7A-NEXT: mvn r12, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r2, r12, r2 +; V7A-NEXT: lsrpl r3, r3, lr +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: and r1, r2, r1 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_c0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsbs.w lr, r2, #32 +; V7A-T-NEXT: rsb.w r2, r2, #64 +; V7A-T-NEXT: mov.w r12, #-1 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsr.w r2, r12, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r3, r3, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: ands r0, r3 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r2, r0, r2 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_c1_indexzext(i64 %val, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_c1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: subs.w r12, r2, #32 +; V7M-NEXT: lsr.w r2, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_c1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb lr, r2, #64 +; V7A-NEXT: mvn r2, #31 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: uxtb r12, lr +; V7A-NEXT: uxtab r2, r2, lr +; V7A-NEXT: lsr r12, r3, r12 +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: movwpl r12, #0 +; V7A-NEXT: lsrpl r3, r3, r2 +; V7A-NEXT: and r1, r12, r1 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_c1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w lr, r2, #64 +; V7A-T-NEXT: mvn r2, #31 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: uxtb.w r12, lr +; V7A-T-NEXT: uxtab r2, r2, lr +; V7A-T-NEXT: lsr.w r12, r3, r12 +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl.w r12, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r2 +; V7A-T-NEXT: and.w r1, r1, r12 +; V7A-T-NEXT: ands r0, r3 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r0, r0, r2 +; V6M-NEXT: uxtb r2, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %mask = lshr i64 -1, %sh_prom + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_c2_load(ptr %w, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_c2_load: +; V7M: @ %bb.0: +; V7M-NEXT: rsbs.w r1, r2, #32 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl r3, r1 +; V7M-NEXT: ldrd r0, r1, [r0] +; V7M-NEXT: mov.w r12, #-1 +; V7M-NEXT: lsr.w r2, r12, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_c2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r5, lr} +; V7A-NEXT: push {r5, lr} +; V7A-NEXT: rsbs r1, r2, #32 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: mvn r12, #0 +; V7A-NEXT: ldm r0, {r0, r5} +; V7A-NEXT: lsrpl r3, r3, r1 +; V7A-NEXT: rsb r1, r2, #64 +; V7A-NEXT: and r0, r3, r0 +; V7A-NEXT: lsr r1, r12, r1 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: and r1, r1, r5 +; V7A-NEXT: pop {r5, pc} +; +; V7A-T-LABEL: bzhi64_c2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsbs.w r1, r2, #32 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: ldrd r0, lr, [r0] +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r3, r1 +; V7A-T-NEXT: rsb.w r1, r2, #64 +; V7A-T-NEXT: mov.w r12, #-1 +; V7A-T-NEXT: and.w r0, r0, r3 +; V7A-T-NEXT: lsr.w r1, r12, r1 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: and.w r1, r1, lr +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r2, r0, r2 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldm r4!, {r2, r3} +; V6M-NEXT: ands r0, r2 +; V6M-NEXT: ands r1, r3 +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_c3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_c3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #64 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: subs.w r2, r1, #32 +; V7M-NEXT: lsr.w r1, r3, r1 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl r3, r2 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_c3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r4, r6, r11, lr} +; V7A-NEXT: push {r4, r6, r11, lr} +; V7A-NEXT: rsb r1, r1, #64 +; V7A-NEXT: mvn r4, #31 +; V7A-NEXT: mvn r2, #0 +; V7A-NEXT: ldr r6, [r0] +; V7A-NEXT: ldr r3, [r0, #4] +; V7A-NEXT: uxtb r0, r1 +; V7A-NEXT: uxtab r4, r4, r1 +; V7A-NEXT: lsr r0, r2, r0 +; V7A-NEXT: cmp r4, #0 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: and r1, r0, r3 +; V7A-NEXT: lsrpl r2, r2, r4 +; V7A-NEXT: and r0, r2, r6 +; V7A-NEXT: pop {r4, r6, r11, pc} +; +; V7A-T-LABEL: bzhi64_c3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r1, r1, #64 +; V7A-T-NEXT: mvn r3, #31 +; V7A-T-NEXT: ldrd r12, lr, [r0] +; V7A-T-NEXT: mov.w r2, #-1 +; V7A-T-NEXT: uxtb r0, r1 +; V7A-T-NEXT: uxtab r3, r3, r1 +; V7A-T-NEXT: lsr.w r0, r2, r0 +; V7A-T-NEXT: cmp r3, #0 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: and.w r1, r0, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl r2, r3 +; V7A-T-NEXT: and.w r0, r2, r12 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: mov r4, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r0, r0, r1 +; V6M-NEXT: uxtb r2, r0 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ldm r4!, {r2, r3} +; V6M-NEXT: ands r0, r2 +; V6M-NEXT: ands r1, r3 +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %mask = lshr i64 -1, %sh_prom + %masked = and i64 %mask, %val + ret i64 %masked +} + +define i64 @bzhi64_c4_commutative(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_c4_commutative: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsbs.w lr, r2, #32 +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: mov.w r12, #-1 +; V7M-NEXT: mov.w r3, #-1 +; V7M-NEXT: lsr.w r2, r12, r2 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r3, r3, lr +; V7M-NEXT: it pl +; V7M-NEXT: movpl r2, #0 +; V7M-NEXT: ands r0, r3 +; V7M-NEXT: ands r1, r2 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_c4_commutative: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsbs lr, r2, #32 +; V7A-NEXT: rsb r2, r2, #64 +; V7A-NEXT: mvn r12, #0 +; V7A-NEXT: mvn r3, #0 +; V7A-NEXT: lsr r2, r12, r2 +; V7A-NEXT: lsrpl r3, r3, lr +; V7A-NEXT: movwpl r2, #0 +; V7A-NEXT: and r0, r0, r3 +; V7A-NEXT: and r1, r1, r2 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_c4_commutative: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsbs.w lr, r2, #32 +; V7A-T-NEXT: rsb.w r2, r2, #64 +; V7A-T-NEXT: mov.w r12, #-1 +; V7A-T-NEXT: mov.w r3, #-1 +; V7A-T-NEXT: lsr.w r2, r12, r2 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r3, r3, lr +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r2, #0 +; V7A-T-NEXT: ands r0, r3 +; V7A-T-NEXT: ands r1, r2 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_c4_commutative: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, r5, r7, lr} +; V6M-NEXT: push {r4, r5, r7, lr} +; V6M-NEXT: mov r4, r1 +; V6M-NEXT: mov r5, r0 +; V6M-NEXT: movs r0, #64 +; V6M-NEXT: subs r2, r0, r2 +; V6M-NEXT: movs r0, #0 +; V6M-NEXT: mvns r0, r0 +; V6M-NEXT: mov r1, r0 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: ands r0, r5 +; V6M-NEXT: ands r1, r4 +; V6M-NEXT: pop {r4, r5, r7, pc} + %numhighbits = sub i64 64, %numlowbits + %mask = lshr i64 -1, %numhighbits + %masked = and i64 %val, %mask ; swapped order + ret i64 %masked +} + +; ---------------------------------------------------------------------------- ; +; Pattern d. 32-bit. +; ---------------------------------------------------------------------------- ; + +define i32 @bzhi32_d0(i32 %val, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_d0: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_d0: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_d0: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %val, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +define i32 @bzhi32_d1_indexzext(i32 %val, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_d1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_d1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_d1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_d1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %highbitscleared = shl i32 %val, %sh_prom + %masked = lshr i32 %highbitscleared, %sh_prom + ret i32 %masked +} + +define i32 @bzhi32_d2_load(ptr %w, i32 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_d2_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_d2_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_d2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_d2_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %numhighbits = sub i32 32, %numlowbits + %highbitscleared = shl i32 %val, %numhighbits + %masked = lshr i32 %highbitscleared, %numhighbits + ret i32 %masked +} + +define i32 @bzhi32_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi32_d3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #32 +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: lsls r0, r1 +; V7M-NEXT: lsrs r0, r1 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_d3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: rsb r1, r1, #32 +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: uxtb r1, r1 +; V7A-NEXT: lsl r0, r0, r1 +; V7A-NEXT: lsr r0, r0, r1 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_d3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: rsb.w r1, r1, #32 +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: uxtb r1, r1 +; V7A-T-NEXT: lsls r0, r1 +; V7A-T-NEXT: lsrs r0, r1 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_d3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #32 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r1, r1 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: lsls r0, r1 +; V6M-NEXT: lsrs r0, r1 +; V6M-NEXT: bx lr + %val = load i32, ptr %w + %numhighbits = sub i8 32, %numlowbits + %sh_prom = zext i8 %numhighbits to i32 + %highbitscleared = shl i32 %val, %sh_prom + %masked = lshr i32 %highbitscleared, %sh_prom + ret i32 %masked +} + +; 64-bit. + +define i64 @bzhi64_d0(i64 %val, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_d0: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r3, r2, #64 +; V7M-NEXT: rsbs.w r2, r2, #32 +; V7M-NEXT: rsb.w lr, r3, #32 +; V7M-NEXT: lsl.w r12, r1, r3 +; V7M-NEXT: lsr.w r1, r0, lr +; V7M-NEXT: orr.w r1, r1, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, r2 +; V7M-NEXT: lsl.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r12, r1, lr +; V7M-NEXT: lsr.w r0, r0, r3 +; V7M-NEXT: orr.w r0, r0, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r2 +; V7M-NEXT: lsr.w r1, r1, r3 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_d0: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb lr, r2, #64 +; V7A-NEXT: rsbs r2, r2, #32 +; V7A-NEXT: rsb r12, lr, #32 +; V7A-NEXT: lsr r3, r0, r12 +; V7A-NEXT: orr r1, r3, r1, lsl lr +; V7A-NEXT: lslpl r1, r0, r2 +; V7A-NEXT: lsl r0, r0, lr +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, lr +; V7A-NEXT: orr r0, r0, r1, lsl r12 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: lsr r1, r1, lr +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_d0: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #64 +; V7A-T-NEXT: rsbs.w r2, r2, #32 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsl.w r12, r1, r3 +; V7A-T-NEXT: lsr.w r1, r0, lr +; V7A-T-NEXT: orr.w r1, r1, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r12, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: lsr.w r1, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_d0: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r4, r3, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %val, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + ret i64 %masked +} + +define i64 @bzhi64_d1_indexzext(i64 %val, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_d1_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r2, r2, #64 +; V7M-NEXT: uxtb r2, r2 +; V7M-NEXT: rsb.w r3, r2, #32 +; V7M-NEXT: lsl.w r12, r1, r2 +; V7M-NEXT: lsr.w r1, r0, r3 +; V7M-NEXT: orr.w r1, r1, r12 +; V7M-NEXT: subs.w r12, r2, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r1, r0, r12 +; V7M-NEXT: lsl.w r0, r0, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r3, r1, r3 +; V7M-NEXT: lsr.w r0, r0, r2 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r1, r12 +; V7M-NEXT: lsr.w r1, r1, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_d1_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r11, lr} +; V7A-NEXT: push {r11, lr} +; V7A-NEXT: rsb lr, r2, #64 +; V7A-NEXT: uxtb r3, lr +; V7A-NEXT: rsb r12, r3, #32 +; V7A-NEXT: lsr r2, r0, r12 +; V7A-NEXT: orr r1, r2, r1, lsl r3 +; V7A-NEXT: mvn r2, #31 +; V7A-NEXT: uxtab r2, r2, lr +; V7A-NEXT: cmp r2, #0 +; V7A-NEXT: lslpl r1, r0, r2 +; V7A-NEXT: lsl r0, r0, r3 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r3 +; V7A-NEXT: orr r0, r0, r1, lsl r12 +; V7A-NEXT: lsrpl r0, r1, r2 +; V7A-NEXT: lsr r1, r1, r3 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r11, pc} +; +; V7A-T-LABEL: bzhi64_d1_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r4, r2, #64 +; V7A-T-NEXT: mvn r2, #31 +; V7A-T-NEXT: uxtb r3, r4 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsl.w r12, r1, r3 +; V7A-T-NEXT: uxtab r2, r2, r4 +; V7A-T-NEXT: lsr.w r1, r0, lr +; V7A-T-NEXT: cmp r2, #0 +; V7A-T-NEXT: orr.w r1, r1, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: lsr.w r1, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bzhi64_d1_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: movs r3, #64 +; V6M-NEXT: subs r2, r3, r2 +; V6M-NEXT: uxtb r4, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %highbitscleared = shl i64 %val, %sh_prom + %masked = lshr i64 %highbitscleared, %sh_prom + ret i64 %masked +} + +define i64 @bzhi64_d2_load(ptr %w, i64 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_d2_load: +; V7M: @ %bb.0: +; V7M-NEXT: .save {r7, lr} +; V7M-NEXT: push {r7, lr} +; V7M-NEXT: rsb.w r1, r2, #64 +; V7M-NEXT: ldrd r0, r3, [r0] +; V7M-NEXT: rsb.w lr, r1, #32 +; V7M-NEXT: rsbs.w r2, r2, #32 +; V7M-NEXT: lsl.w r12, r3, r1 +; V7M-NEXT: lsr.w r3, r0, lr +; V7M-NEXT: orr.w r3, r3, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r3, r0, r2 +; V7M-NEXT: lsl.w r0, r0, r1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r12, r3, lr +; V7M-NEXT: lsr.w r0, r0, r1 +; V7M-NEXT: lsr.w r1, r3, r1 +; V7M-NEXT: orr.w r0, r0, r12 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r3, r2 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: pop {r7, pc} +; +; V7A-LABEL: bzhi64_d2_load: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r5, r7, r11, lr} +; V7A-NEXT: push {r5, r7, r11, lr} +; V7A-NEXT: rsb r3, r2, #64 +; V7A-NEXT: ldm r0, {r0, r7} +; V7A-NEXT: rsb r1, r3, #32 +; V7A-NEXT: rsbs r2, r2, #32 +; V7A-NEXT: lsr r5, r0, r1 +; V7A-NEXT: orr r7, r5, r7, lsl r3 +; V7A-NEXT: lslpl r7, r0, r2 +; V7A-NEXT: lsl r0, r0, r3 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r3 +; V7A-NEXT: orr r0, r0, r7, lsl r1 +; V7A-NEXT: lsr r1, r7, r3 +; V7A-NEXT: lsrpl r0, r7, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r5, r7, r11, pc} +; +; V7A-T-LABEL: bzhi64_d2_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r7, lr} +; V7A-T-NEXT: push {r7, lr} +; V7A-T-NEXT: rsb.w r3, r2, #64 +; V7A-T-NEXT: ldrd r0, r1, [r0] +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: rsbs.w r2, r2, #32 +; V7A-T-NEXT: lsl.w r12, r1, r3 +; V7A-T-NEXT: lsr.w r1, r0, lr +; V7A-T-NEXT: orr.w r1, r1, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r1, r0, r2 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r12, r1, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r1, r2 +; V7A-T-NEXT: lsr.w r1, r1, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r7, pc} +; +; V6M-LABEL: bzhi64_d2_load: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: movs r1, #64 +; V6M-NEXT: subs r4, r1, r2 +; V6M-NEXT: ldr r2, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %numhighbits = sub i64 64, %numlowbits + %highbitscleared = shl i64 %val, %numhighbits + %masked = lshr i64 %highbitscleared, %numhighbits + ret i64 %masked +} + +define i64 @bzhi64_d3_load_indexzext(ptr %w, i8 %numlowbits) nounwind { +; V7M-LABEL: bzhi64_d3_load_indexzext: +; V7M: @ %bb.0: +; V7M-NEXT: rsb.w r1, r1, #64 +; V7M-NEXT: ldrd r0, r2, [r0] +; V7M-NEXT: uxtb r1, r1 +; V7M-NEXT: rsb.w r3, r1, #32 +; V7M-NEXT: lsl.w r12, r2, r1 +; V7M-NEXT: lsr.w r2, r0, r3 +; V7M-NEXT: orr.w r2, r2, r12 +; V7M-NEXT: subs.w r12, r1, #32 +; V7M-NEXT: it pl +; V7M-NEXT: lslpl.w r2, r0, r12 +; V7M-NEXT: lsl.w r0, r0, r1 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r0, #0 +; V7M-NEXT: lsl.w r3, r2, r3 +; V7M-NEXT: lsr.w r0, r0, r1 +; V7M-NEXT: lsr.w r1, r2, r1 +; V7M-NEXT: orr.w r0, r0, r3 +; V7M-NEXT: it pl +; V7M-NEXT: lsrpl.w r0, r2, r12 +; V7M-NEXT: it pl +; V7M-NEXT: movpl r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_d3_load_indexzext: +; V7A: @ %bb.0: +; V7A-NEXT: .save {r5, r7, r11, lr} +; V7A-NEXT: push {r5, r7, r11, lr} +; V7A-NEXT: rsb r1, r1, #64 +; V7A-NEXT: ldm r0, {r0, r7} +; V7A-NEXT: uxtb r2, r1 +; V7A-NEXT: rsb r3, r2, #32 +; V7A-NEXT: lsr r5, r0, r3 +; V7A-NEXT: orr r7, r5, r7, lsl r2 +; V7A-NEXT: mvn r5, #31 +; V7A-NEXT: uxtab r1, r5, r1 +; V7A-NEXT: cmp r1, #0 +; V7A-NEXT: lslpl r7, r0, r1 +; V7A-NEXT: lsl r0, r0, r2 +; V7A-NEXT: movwpl r0, #0 +; V7A-NEXT: lsr r0, r0, r2 +; V7A-NEXT: orr r0, r0, r7, lsl r3 +; V7A-NEXT: lsrpl r0, r7, r1 +; V7A-NEXT: lsr r1, r7, r2 +; V7A-NEXT: movwpl r1, #0 +; V7A-NEXT: pop {r5, r7, r11, pc} +; +; V7A-T-LABEL: bzhi64_d3_load_indexzext: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: .save {r4, lr} +; V7A-T-NEXT: push {r4, lr} +; V7A-T-NEXT: rsb.w r4, r1, #64 +; V7A-T-NEXT: ldrd r0, r2, [r0] +; V7A-T-NEXT: mvn r1, #31 +; V7A-T-NEXT: uxtb r3, r4 +; V7A-T-NEXT: rsb.w lr, r3, #32 +; V7A-T-NEXT: lsl.w r12, r2, r3 +; V7A-T-NEXT: uxtab r1, r1, r4 +; V7A-T-NEXT: lsr.w r2, r0, lr +; V7A-T-NEXT: cmp r1, #0 +; V7A-T-NEXT: orr.w r2, r2, r12 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lslpl.w r2, r0, r1 +; V7A-T-NEXT: lsl.w r0, r0, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r0, #0 +; V7A-T-NEXT: lsl.w r4, r2, lr +; V7A-T-NEXT: lsr.w r0, r0, r3 +; V7A-T-NEXT: orr.w r0, r0, r4 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: lsrpl.w r0, r2, r1 +; V7A-T-NEXT: lsr.w r1, r2, r3 +; V7A-T-NEXT: it pl +; V7A-T-NEXT: movpl r1, #0 +; V7A-T-NEXT: pop {r4, pc} +; +; V6M-LABEL: bzhi64_d3_load_indexzext: +; V6M: @ %bb.0: +; V6M-NEXT: .save {r4, lr} +; V6M-NEXT: push {r4, lr} +; V6M-NEXT: movs r2, #64 +; V6M-NEXT: subs r1, r2, r1 +; V6M-NEXT: uxtb r4, r1 +; V6M-NEXT: ldr r2, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsl +; V6M-NEXT: mov r2, r4 +; V6M-NEXT: bl __aeabi_llsr +; V6M-NEXT: pop {r4, pc} + %val = load i64, ptr %w + %numhighbits = sub i8 64, %numlowbits + %sh_prom = zext i8 %numhighbits to i64 + %highbitscleared = shl i64 %val, %sh_prom + %masked = lshr i64 %highbitscleared, %sh_prom + ret i64 %masked +} + +; ---------------------------------------------------------------------------- ; +; Constant mask +; ---------------------------------------------------------------------------- ; + +; 32-bit + +define i32 @bzhi32_constant_mask32(i32 %val) nounwind { +; V7M-LABEL: bzhi32_constant_mask32: +; V7M: @ %bb.0: +; V7M-NEXT: bic r0, r0, #-2147483648 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask32: +; V7A: @ %bb.0: +; V7A-NEXT: bic r0, r0, #-2147483648 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bic r0, r0, #-2147483648 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask32: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r1, #31 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %masked = and i32 %val, 2147483647 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask32_load(ptr %val) nounwind { +; V7M-LABEL: bzhi32_constant_mask32_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: bic r0, r0, #-2147483648 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask32_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: bic r0, r0, #-2147483648 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask32_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: bic r0, r0, #-2147483648 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask32_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r1, #31 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: bx lr + %val1 = load i32, ptr %val + %masked = and i32 %val1, 2147483647 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask16(i32 %val) nounwind { +; V7M-LABEL: bzhi32_constant_mask16: +; V7M: @ %bb.0: +; V7M-NEXT: bfc r0, #15, #17 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask16: +; V7A: @ %bb.0: +; V7A-NEXT: bfc r0, #15, #17 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask16: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bfc r0, #15, #17 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask16: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, .LCPI41_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI41_0: +; V6M-NEXT: .long 32767 @ 0x7fff + %masked = and i32 %val, 32767 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask16_load(ptr %val) nounwind { +; V7M-LABEL: bzhi32_constant_mask16_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: bfc r0, #15, #17 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask16_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: bfc r0, #15, #17 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask16_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: bfc r0, #15, #17 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask16_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r0] +; V6M-NEXT: ldr r0, .LCPI42_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI42_0: +; V6M-NEXT: .long 32767 @ 0x7fff + %val1 = load i32, ptr %val + %masked = and i32 %val1, 32767 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask8(i32 %val) nounwind { +; V7M-LABEL: bzhi32_constant_mask8: +; V7M: @ %bb.0: +; V7M-NEXT: and r0, r0, #127 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask8: +; V7A: @ %bb.0: +; V7A-NEXT: and r0, r0, #127 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask8: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: and r0, r0, #127 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask8: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #127 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %masked = and i32 %val, 127 + ret i32 %masked +} + +define i32 @bzhi32_constant_mask8_load(ptr %val) nounwind { +; V7M-LABEL: bzhi32_constant_mask8_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: and r0, r0, #127 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi32_constant_mask8_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: and r0, r0, #127 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi32_constant_mask8_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: and r0, r0, #127 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi32_constant_mask8_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r0] +; V6M-NEXT: movs r0, #127 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: bx lr + %val1 = load i32, ptr %val + %masked = and i32 %val1, 127 + ret i32 %masked +} + +; 64-bit + +define i64 @bzhi64_constant_mask64(i64 %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask64: +; V7M: @ %bb.0: +; V7M-NEXT: bic r1, r1, #-1073741824 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask64: +; V7A: @ %bb.0: +; V7A-NEXT: bic r1, r1, #-1073741824 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask64: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bic r1, r1, #-1073741824 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask64: +; V6M: @ %bb.0: +; V6M-NEXT: movs r2, #3 +; V6M-NEXT: lsls r2, r2, #30 +; V6M-NEXT: bics r1, r2 +; V6M-NEXT: bx lr + %masked = and i64 %val, 4611686018427387903 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask64_load(ptr %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask64_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldrd r0, r1, [r0] +; V7M-NEXT: bic r1, r1, #-1073741824 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask64_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldrd r0, r1, [r0] +; V7A-NEXT: bic r1, r1, #-1073741824 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask64_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldrd r0, r1, [r0] +; V7A-T-NEXT: bic r1, r1, #-1073741824 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask64_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #3 +; V6M-NEXT: lsls r3, r1, #30 +; V6M-NEXT: ldr r2, [r0] +; V6M-NEXT: ldr r1, [r0, #4] +; V6M-NEXT: bics r1, r3 +; V6M-NEXT: mov r0, r2 +; V6M-NEXT: bx lr + %val1 = load i64, ptr %val + %masked = and i64 %val1, 4611686018427387903 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask32(i64 %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask32: +; V7M: @ %bb.0: +; V7M-NEXT: bic r0, r0, #-2147483648 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask32: +; V7A: @ %bb.0: +; V7A-NEXT: bic r0, r0, #-2147483648 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask32: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bic r0, r0, #-2147483648 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask32: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r1, #31 +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %masked = and i64 %val, 2147483647 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask32_load(ptr %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask32_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bic r0, r0, #-2147483648 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask32_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bic r0, r0, #-2147483648 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask32_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bic r0, r0, #-2147483648 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask32_load: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #1 +; V6M-NEXT: lsls r1, r1, #31 +; V6M-NEXT: ldr r0, [r0] +; V6M-NEXT: bics r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %val1 = load i64, ptr %val + %masked = and i64 %val1, 2147483647 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask16(i64 %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask16: +; V7M: @ %bb.0: +; V7M-NEXT: bfc r0, #15, #17 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask16: +; V7A: @ %bb.0: +; V7A-NEXT: bfc r0, #15, #17 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask16: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: bfc r0, #15, #17 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask16: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, .LCPI49_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI49_0: +; V6M-NEXT: .long 32767 @ 0x7fff + %masked = and i64 %val, 32767 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask16_load(ptr %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask16_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bfc r0, #15, #17 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask16_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bfc r0, #15, #17 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask16_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bfc r0, #15, #17 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask16_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r0] +; V6M-NEXT: ldr r0, .LCPI50_0 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr +; V6M-NEXT: .p2align 2 +; V6M-NEXT: @ %bb.1: +; V6M-NEXT: .LCPI50_0: +; V6M-NEXT: .long 32767 @ 0x7fff + %val1 = load i64, ptr %val + %masked = and i64 %val1, 32767 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask8(i64 %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask8: +; V7M: @ %bb.0: +; V7M-NEXT: and r0, r0, #127 +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask8: +; V7A: @ %bb.0: +; V7A-NEXT: and r0, r0, #127 +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask8: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: and r0, r0, #127 +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask8: +; V6M: @ %bb.0: +; V6M-NEXT: movs r1, #127 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %masked = and i64 %val, 127 + ret i64 %masked +} + +define i64 @bzhi64_constant_mask8_load(ptr %val) nounwind { +; V7M-LABEL: bzhi64_constant_mask8_load: +; V7M: @ %bb.0: +; V7M-NEXT: ldr r0, [r0] +; V7M-NEXT: movs r1, #0 +; V7M-NEXT: and r0, r0, #127 +; V7M-NEXT: bx lr +; +; V7A-LABEL: bzhi64_constant_mask8_load: +; V7A: @ %bb.0: +; V7A-NEXT: ldr r0, [r0] +; V7A-NEXT: mov r1, #0 +; V7A-NEXT: and r0, r0, #127 +; V7A-NEXT: bx lr +; +; V7A-T-LABEL: bzhi64_constant_mask8_load: +; V7A-T: @ %bb.0: +; V7A-T-NEXT: ldr r0, [r0] +; V7A-T-NEXT: movs r1, #0 +; V7A-T-NEXT: and r0, r0, #127 +; V7A-T-NEXT: bx lr +; +; V6M-LABEL: bzhi64_constant_mask8_load: +; V6M: @ %bb.0: +; V6M-NEXT: ldr r1, [r0] +; V6M-NEXT: movs r0, #127 +; V6M-NEXT: ands r0, r1 +; V6M-NEXT: movs r1, #0 +; V6M-NEXT: bx lr + %val1 = load i64, ptr %val + %masked = and i64 %val1, 127 + ret i64 %masked +} diff --git a/llvm/test/CodeGen/ARM/llrint-conv.ll b/llvm/test/CodeGen/ARM/llrint-conv.ll index 749ee00..a1a04db 100644 --- a/llvm/test/CodeGen/ARM/llrint-conv.ll +++ b/llvm/test/CodeGen/ARM/llrint-conv.ll @@ -1,46 +1,71 @@ -; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP -; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=armv7-none-eabi -float-abi=soft | FileCheck %s --check-prefixes=CHECK,CHECK-SOFT +; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16 +; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2,+fullfp16 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FP16 -; SOFTFP-LABEL: testmsxh_builtin: -; SOFTFP: bl llrintf -; HARDFP-LABEL: testmsxh_builtin: -; HARDFP: bl llrintf define i64 @testmsxh_builtin(half %x) { +; CHECK-SOFT-LABEL: testmsxh_builtin: +; CHECK-SOFT: @ %bb.0: @ %entry +; CHECK-SOFT-NEXT: .save {r11, lr} +; CHECK-SOFT-NEXT: push {r11, lr} +; CHECK-SOFT-NEXT: bl __aeabi_h2f +; CHECK-SOFT-NEXT: bl llrintf +; CHECK-SOFT-NEXT: pop {r11, pc} +; +; CHECK-NOFP16-LABEL: testmsxh_builtin: +; CHECK-NOFP16: @ %bb.0: @ %entry +; CHECK-NOFP16-NEXT: .save {r11, lr} +; CHECK-NOFP16-NEXT: push {r11, lr} +; CHECK-NOFP16-NEXT: vmov r0, s0 +; CHECK-NOFP16-NEXT: bl __aeabi_h2f +; CHECK-NOFP16-NEXT: vmov s0, r0 +; CHECK-NOFP16-NEXT: bl llrintf +; CHECK-NOFP16-NEXT: pop {r11, pc} +; +; CHECK-FP16-LABEL: testmsxh_builtin: +; CHECK-FP16: @ %bb.0: @ %entry +; CHECK-FP16-NEXT: .save {r11, lr} +; CHECK-FP16-NEXT: push {r11, lr} +; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0 +; CHECK-FP16-NEXT: bl llrintf +; CHECK-FP16-NEXT: pop {r11, pc} entry: %0 = tail call i64 @llvm.llrint.i64.f16(half %x) ret i64 %0 } -; SOFTFP-LABEL: testmsxs_builtin: -; SOFTFP: bl llrintf -; HARDFP-LABEL: testmsxs_builtin: -; HARDFP: bl llrintf define i64 @testmsxs_builtin(float %x) { +; CHECK-LABEL: testmsxs_builtin: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: bl llrintf +; CHECK-NEXT: pop {r11, pc} entry: %0 = tail call i64 @llvm.llrint.i64.f32(float %x) ret i64 %0 } -; SOFTFP-LABEL: testmsxd_builtin: -; SOFTFP: bl llrint -; HARDFP-LABEL: testmsxd_builtin: -; HARDFP: bl llrint define i64 @testmsxd_builtin(double %x) { +; CHECK-LABEL: testmsxd_builtin: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: bl llrint +; CHECK-NEXT: pop {r11, pc} entry: %0 = tail call i64 @llvm.llrint.i64.f64(double %x) ret i64 %0 } -; FIXME(#44744): incorrect libcall -; SOFTFP-LABEL: testmsxq_builtin: -; SOFTFP: bl llrintl -; HARDFP-LABEL: testmsxq_builtin: -; HARDFP: bl llrintl define i64 @testmsxq_builtin(fp128 %x) { +; CHECK-LABEL: testmsxq_builtin: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: bl llrintl +; CHECK-NEXT: pop {r11, pc} entry: %0 = tail call i64 @llvm.llrint.i64.f128(fp128 %x) ret i64 %0 } - -declare i64 @llvm.llrint.i64.f32(float) nounwind readnone -declare i64 @llvm.llrint.i64.f64(double) nounwind readnone diff --git a/llvm/test/CodeGen/ARM/lrint-conv.ll b/llvm/test/CodeGen/ARM/lrint-conv.ll index 9aa9511..23a2685 100644 --- a/llvm/test/CodeGen/ARM/lrint-conv.ll +++ b/llvm/test/CodeGen/ARM/lrint-conv.ll @@ -1,5 +1,7 @@ -; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP -; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=armv7-none-eabi -float-abi=soft | FileCheck %s --check-prefixes=CHECK,CHECK-SOFT +; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16 +; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2,+fullfp16 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FP16 ; FIXME: crash ; define i32 @testmswh_builtin(half %x) { @@ -8,36 +10,37 @@ ; ret i32 %0 ; } -; SOFTFP-LABEL: testmsws_builtin: -; SOFTFP: bl lrintf -; HARDFP-LABEL: testmsws_builtin: -; HARDFP: bl lrintf define i32 @testmsws_builtin(float %x) { +; CHECK-LABEL: testmsws_builtin: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: b lrintf entry: %0 = tail call i32 @llvm.lrint.i32.f32(float %x) ret i32 %0 } -; SOFTFP-LABEL: testmswd_builtin: -; SOFTFP: bl lrint -; HARDFP-LABEL: testmswd_builtin: -; HARDFP: bl lrint define i32 @testmswd_builtin(double %x) { +; CHECK-LABEL: testmswd_builtin: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: b lrint entry: %0 = tail call i32 @llvm.lrint.i32.f64(double %x) ret i32 %0 } -; FIXME(#44744): incorrect libcall -; SOFTFP-LABEL: testmswq_builtin: -; SOFTFP: bl lrintl -; HARDFP-LABEL: testmswq_builtin: -; HARDFP: bl lrintl define i32 @testmswq_builtin(fp128 %x) { +; CHECK-LABEL: testmswq_builtin: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r11, lr} +; CHECK-NEXT: push {r11, lr} +; CHECK-NEXT: bl lrintl +; CHECK-NEXT: pop {r11, pc} entry: %0 = tail call i32 @llvm.lrint.i32.f128(fp128 %x) ret i32 %0 } -declare i32 @llvm.lrint.i32.f32(float) nounwind readnone -declare i32 @llvm.lrint.i32.f64(double) nounwind readnone +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK-FP16: {{.*}} +; CHECK-NOFP16: {{.*}} +; CHECK-SOFT: {{.*}} diff --git a/llvm/test/CodeGen/ARM/vector-lrint.ll b/llvm/test/CodeGen/ARM/vector-lrint.ll index fe5e3cb..c1159da 100644 --- a/llvm/test/CodeGen/ARM/vector-lrint.ll +++ b/llvm/test/CodeGen/ARM/vector-lrint.ll @@ -14,31 +14,26 @@ ; %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half> %x) ; ret <1 x iXLen> %a ; } -; declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half>) ; define <2 x iXLen> @lrint_v2f16(<2 x half> %x) { ; %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half> %x) ; ret <2 x iXLen> %a ; } -; declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half>) ; define <4 x iXLen> @lrint_v4f16(<4 x half> %x) { ; %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half> %x) ; ret <4 x iXLen> %a ; } -; declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half>) ; define <8 x iXLen> @lrint_v8f16(<8 x half> %x) { ; %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half> %x) ; ret <8 x iXLen> %a ; } -; declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half>) ; define <16 x iXLen> @lrint_v16f16(<16 x half> %x) { ; %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half> %x) ; ret <16 x iXLen> %a ; } -; declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half>) define <1 x iXLen> @lrint_v1f32(<1 x float> %x) { ; LE-I32-LABEL: lrint_v1f32: @@ -76,7 +71,6 @@ define <1 x iXLen> @lrint_v1f32(<1 x float> %x) { %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float>) define <2 x iXLen> @lrint_v2f32(<2 x float> %x) { ; LE-I32-LABEL: lrint_v2f32: @@ -160,7 +154,6 @@ define <2 x iXLen> @lrint_v2f32(<2 x float> %x) { %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float>) define <4 x iXLen> @lrint_v4f32(<4 x float> %x) { ; LE-I32-LABEL: lrint_v4f32: @@ -274,7 +267,6 @@ define <4 x iXLen> @lrint_v4f32(<4 x float> %x) { %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float>) define <8 x iXLen> @lrint_v8f32(<8 x float> %x) { ; LE-I32-LABEL: lrint_v8f32: @@ -488,7 +480,6 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x) { %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float>) define <16 x iXLen> @lrint_v16f32(<16 x float> %x) { ; LE-I32-LABEL: lrint_v16f32: @@ -1005,7 +996,6 @@ define <16 x iXLen> @lrint_v16f32(<16 x float> %x) { %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float>) define <1 x iXLen> @lrint_v1f64(<1 x double> %x) { ; LE-I32-LABEL: lrint_v1f64: @@ -1043,7 +1033,6 @@ define <1 x iXLen> @lrint_v1f64(<1 x double> %x) { %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double>) define <2 x iXLen> @lrint_v2f64(<2 x double> %x) { ; LE-I32-LABEL: lrint_v2f64: @@ -1120,7 +1109,6 @@ define <2 x iXLen> @lrint_v2f64(<2 x double> %x) { %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double>) define <4 x iXLen> @lrint_v4f64(<4 x double> %x) { ; LE-I32-LABEL: lrint_v4f64: @@ -1237,7 +1225,6 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x) { %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double>) define <8 x iXLen> @lrint_v8f64(<8 x double> %x) { ; LE-I32-LABEL: lrint_v8f64: @@ -1467,7 +1454,6 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) { %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double>) define <16 x iXLen> @lrint_v16f64(<16 x double> %x) { ; LE-I32-LABEL: lrint_v16f64: @@ -2053,7 +2039,6 @@ define <16 x iXLen> @lrint_v16f64(<16 x double> %x) { %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f64(<16 x double> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f64(<16 x double>) define <1 x iXLen> @lrint_v1fp128(<1 x fp128> %x) { ; LE-I32-LABEL: lrint_v1fp128: @@ -2091,7 +2076,6 @@ define <1 x iXLen> @lrint_v1fp128(<1 x fp128> %x) { %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1fp128(<1 x fp128> %x) ret <1 x iXLen> %a } -declare <1 x iXLen> @llvm.lrint.v1iXLen.v1fp128(<1 x fp128>) define <2 x iXLen> @lrint_v2fp128(<2 x fp128> %x) { ; LE-I32-LABEL: lrint_v2fp128: @@ -2194,7 +2178,6 @@ define <2 x iXLen> @lrint_v2fp128(<2 x fp128> %x) { %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2fp128(<2 x fp128> %x) ret <2 x iXLen> %a } -declare <2 x iXLen> @llvm.lrint.v2iXLen.v2fp128(<2 x fp128>) define <4 x iXLen> @lrint_v4fp128(<4 x fp128> %x) { ; LE-I32-LABEL: lrint_v4fp128: @@ -2347,7 +2330,6 @@ define <4 x iXLen> @lrint_v4fp128(<4 x fp128> %x) { %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4fp128(<4 x fp128> %x) ret <4 x iXLen> %a } -declare <4 x iXLen> @llvm.lrint.v4iXLen.v4fp128(<4 x fp128>) define <8 x iXLen> @lrint_v8fp128(<8 x fp128> %x) { ; LE-I32-LABEL: lrint_v8fp128: @@ -2664,7 +2646,6 @@ define <8 x iXLen> @lrint_v8fp128(<8 x fp128> %x) { %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8fp128(<8 x fp128> %x) ret <8 x iXLen> %a } -declare <8 x iXLen> @llvm.lrint.v8iXLen.v8fp128(<8 x fp128>) define <16 x iXLen> @lrint_v16fp128(<16 x fp128> %x) { ; LE-I32-LABEL: lrint_v16fp128: @@ -3262,4 +3243,3 @@ define <16 x iXLen> @lrint_v16fp128(<16 x fp128> %x) { %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16fp128(<16 x fp128> %x) ret <16 x iXLen> %a } -declare <16 x iXLen> @llvm.lrint.v16iXLen.v16fp128(<16 x fp128>) diff --git a/llvm/test/CodeGen/Hexagon/isel-inttofp-v32i1tov32f32.ll b/llvm/test/CodeGen/Hexagon/isel-inttofp-v32i1tov32f32.ll new file mode 100644 index 0000000..93ca3a2 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/isel-inttofp-v32i1tov32f32.ll @@ -0,0 +1,42 @@ +; Tests lowering of v32i1 to v32f32 + +; RUN: llc -march=hexagon -mattr=+hvxv79,+hvx-length128b,+hvx-ieee-fp \ +; RUN: -stop-after=hexagon-isel %s -o - | FileCheck %s + +define <32 x float> @uitofp_i1(<32 x i16> %in0, <32 x i16> %in1) #0 { +; CHECK: name: uitofp_i1 +; CHECK: [[R0:%[0-9]+]]:hvxvr = V6_lvsplatw killed %{{[0-9]+}} +; CHECK-NEXT: [[R1:%[0-9]+]]:intregs = A2_tfrsi 1 +; CHECK-NEXT: [[R2:%[0-9]+]]:hvxvr = V6_lvsplatw [[R1]] +; CHECK-NEXT: [[R3:%[0-9]+]]:hvxqr = V6_vandvrt [[R2]], [[R1]] +; CHECK-NEXT: [[R4:%[0-9]+]]:hvxvr = V6_vprefixqw killed [[R3]] +; CHECK-NEXT: [[R5:%[0-9]+]]:hvxvr = V6_vsubw killed [[R4]], [[R2]] +; CHECK-NEXT: [[R6:%[0-9]+]]:hvxvr = V6_vlsrwv killed [[R0]], killed [[R5]] +; CHECK-NEXT: [[R7:%[0-9]+]]:hvxvr = V6_vand killed [[R6]], [[R2]] +; CHECK-NEXT: [[R8:%[0-9]+]]:hvxvr = V6_vconv_sf_w killed [[R7]] +; CHECK-NEXT: hvxvr = V6_vadd_sf_sf [[R8]], [[R8]] + %q1 = icmp eq <32 x i16> %in0, %in1 + %fp0 = uitofp <32 x i1> %q1 to <32 x float> + %out = fadd <32 x float> %fp0, %fp0 + ret <32 x float> %out +} + +define <32 x float> @sitofp_i1(<32 x i16> %in0, <32 x i16> %in1) #0 { +; CHECK: name: sitofp_i1 +; CHECK: [[R0:%[0-9]+]]:hvxvr = V6_lvsplatw killed %{{[0-9]+}} +; CHECK-NEXT: [[R1:%[0-9]+]]:intregs = A2_tfrsi 1 +; CHECK-NEXT: [[R2:%[0-9]+]]:hvxvr = V6_lvsplatw [[R1]] +; CHECK-NEXT: [[R3:%[0-9]+]]:hvxqr = V6_vandvrt [[R2]], [[R1]] +; CHECK-NEXT: [[R4:%[0-9]+]]:hvxvr = V6_vprefixqw killed [[R3]] +; CHECK-NEXT: [[R5:%[0-9]+]]:hvxvr = V6_vsubw killed [[R4]], [[R2]] +; CHECK-NEXT: [[R6:%[0-9]+]]:hvxvr = V6_vlsrwv killed [[R0]], killed [[R5]] +; CHECK-NEXT: [[R7:%[0-9]+]]:hvxvr = V6_vand killed [[R6]], [[R2]] +; CHECK-NEXT: [[R8:%[0-9]+]]:hvxvr = V6_vconv_sf_w killed [[R7]] +; CHECK-NEXT: hvxvr = V6_vadd_sf_sf [[R8]], [[R8]] + %q1 = icmp eq <32 x i16> %in0, %in1 + %fp0 = sitofp <32 x i1> %q1 to <32 x float> + %out = fadd <32 x float> %fp0, %fp0 + ret <32 x float> %out +} + +attributes #0 = { nounwind readnone "target-cpu"="hexagonv79" "target-features"="+hvxv79,+hvx-length128b" } diff --git a/llvm/test/CodeGen/Hexagon/isel-uinttofp-v32i1tov32f32.ll b/llvm/test/CodeGen/Hexagon/isel-uinttofp-v32i1tov32f32.ll deleted file mode 100644 index dfb2bc83..0000000 --- a/llvm/test/CodeGen/Hexagon/isel-uinttofp-v32i1tov32f32.ll +++ /dev/null @@ -1,25 +0,0 @@ -; Tests lowering of v32i1 to v32f32 - -; RUN: llc -march=hexagon -mattr=+hvxv79,+hvx-length128b,+hvx-ieee-fp \ -; RUN: -stop-after=hexagon-isel %s -o - | FileCheck %s - -; CHECK: [[R0:%[0-9]+]]:hvxvr = V6_lvsplatw killed %{{[0-9]+}} -; CHECK-NEXT: [[R1:%[0-9]+]]:intregs = A2_tfrsi 1 -; CHECK-NEXT: [[R2:%[0-9]+]]:hvxvr = V6_lvsplatw [[R1]] -; CHECK-NEXT: [[R3:%[0-9]+]]:hvxqr = V6_vandvrt [[R2]], [[R1]] -; CHECK-NEXT: [[R4:%[0-9]+]]:hvxvr = V6_vprefixqw killed [[R3]] -; CHECK-NEXT: [[R5:%[0-9]+]]:hvxvr = V6_vsubw killed [[R4]], [[R2]] -; CHECK-NEXT: [[R6:%[0-9]+]]:hvxvr = V6_vlsrwv killed [[R0]], killed [[R5]] -; CHECK-NEXT: [[R7:%[0-9]+]]:hvxvr = V6_vand killed [[R6]], [[R2]] -; CHECK-NEXT: [[R8:%[0-9]+]]:hvxvr = V6_vconv_sf_w killed [[R7]] -; CHECK-NEXT: hvxvr = V6_vadd_sf_sf [[R8]], [[R8]] - -define <32 x float> @uitofp_i1(<32 x i16> %in0, <32 x i16> %in1) #0 -{ - %q1 = icmp eq <32 x i16> %in0, %in1 - %fp0 = uitofp <32 x i1> %q1 to <32 x float> - %out = fadd <32 x float> %fp0, %fp0 - ret <32 x float> %out -} - -attributes #0 = { nounwind readnone "target-cpu"="hexagonv79" "target-features"="+hvxv79,+hvx-length128b" } diff --git a/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll b/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll index 18fb879..21ca041 100644 --- a/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll +++ b/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll @@ -115,5 +115,150 @@ define ptx_kernel void @inlineasm(ptr %p) { store <2 x float> %mul, ptr %p, align 8 ret void } + +define ptx_kernel void @trunc_v2i32(<2 x i32> %0) { +; CHECK-SM90A-LABEL: trunc_v2i32( +; CHECK-SM90A: { +; CHECK-SM90A-NEXT: .reg .b32 %r<7>; +; CHECK-SM90A-NEXT: .reg .b64 %rd<2>; +; CHECK-SM90A-EMPTY: +; CHECK-SM90A-NEXT: // %bb.0: +; CHECK-SM90A-NEXT: ld.param.v2.b32 {%r1, %r2}, [trunc_v2i32_param_0]; +; CHECK-SM90A-NEXT: prmt.b32 %r3, %r1, %r2, 0x3340U; +; CHECK-SM90A-NEXT: mov.b32 %r4, 0; +; CHECK-SM90A-NEXT: prmt.b32 %r5, %r4, 0, 0x3340U; +; CHECK-SM90A-NEXT: prmt.b32 %r6, %r5, %r3, 0x5410U; +; CHECK-SM90A-NEXT: mov.b64 %rd1, 0; +; CHECK-SM90A-NEXT: st.b32 [%rd1], %r6; +; CHECK-SM90A-NEXT: ret; +; +; CHECK-SM100-LABEL: trunc_v2i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<7>; +; CHECK-SM100-NEXT: .reg .b64 %rd<3>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b64 %rd1, [trunc_v2i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-SM100-NEXT: mov.b32 %r3, 0; +; CHECK-SM100-NEXT: prmt.b32 %r4, %r3, 0, 0x3340U; +; CHECK-SM100-NEXT: prmt.b32 %r5, %r1, %r2, 0x3340U; +; CHECK-SM100-NEXT: prmt.b32 %r6, %r4, %r5, 0x5410U; +; CHECK-SM100-NEXT: mov.b64 %rd2, 0; +; CHECK-SM100-NEXT: st.b32 [%rd2], %r6; +; CHECK-SM100-NEXT: ret; + %2 = trunc <2 x i32> %0 to <2 x i8> + %3 = shufflevector <2 x i8> zeroinitializer, <2 x i8> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + store <4 x i8> %3, ptr null, align 4 + ret void +} + +define ptx_kernel void @zextend_to_v2i32(<2 x i8> %0) { +; CHECK-SM90A-LABEL: zextend_to_v2i32( +; CHECK-SM90A: { +; CHECK-SM90A-NEXT: .reg .b16 %rs<3>; +; CHECK-SM90A-NEXT: .reg .b32 %r<4>; +; CHECK-SM90A-NEXT: .reg .b64 %rd<5>; +; CHECK-SM90A-EMPTY: +; CHECK-SM90A-NEXT: // %bb.0: +; CHECK-SM90A-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [zextend_to_v2i32_param_0]; +; CHECK-SM90A-NEXT: mov.b32 %r1, {%rs1, %rs2}; +; CHECK-SM90A-NEXT: cvt.u32.u16 %r2, %rs1; +; CHECK-SM90A-NEXT: cvt.u32.u16 %r3, %rs2; +; CHECK-SM90A-NEXT: mov.b64 %rd1, 12; +; CHECK-SM90A-NEXT: st.b32 [%rd1], %r3; +; CHECK-SM90A-NEXT: mov.b64 %rd2, 8; +; CHECK-SM90A-NEXT: st.b32 [%rd2], %r2; +; CHECK-SM90A-NEXT: mov.b64 %rd3, 4; +; CHECK-SM90A-NEXT: st.b32 [%rd3], 0; +; CHECK-SM90A-NEXT: mov.b64 %rd4, 0; +; CHECK-SM90A-NEXT: st.b32 [%rd4], 0; +; CHECK-SM90A-NEXT: ret; +; +; CHECK-SM100-LABEL: zextend_to_v2i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b16 %rs<3>; +; CHECK-SM100-NEXT: .reg .b32 %r<5>; +; CHECK-SM100-NEXT: .reg .b64 %rd<8>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [zextend_to_v2i32_param_0]; +; CHECK-SM100-NEXT: mov.b32 %r1, {%rs1, %rs2}; +; CHECK-SM100-NEXT: cvt.u32.u16 %r2, %rs2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r3, %rs1; +; CHECK-SM100-NEXT: mov.b64 %rd1, {%r3, %r2}; +; CHECK-SM100-NEXT: mov.b32 %r4, 0; +; CHECK-SM100-NEXT: mov.b64 %rd2, {%r4, %r4}; +; CHECK-SM100-NEXT: mov.b64 %rd3, 4; +; CHECK-SM100-NEXT: st.b32 [%rd3], %rd2; +; CHECK-SM100-NEXT: mov.b64 %rd4, 0; +; CHECK-SM100-NEXT: st.b32 [%rd4], %rd2; +; CHECK-SM100-NEXT: mov.b64 %rd5, 8; +; CHECK-SM100-NEXT: st.b32 [%rd5], %rd1; +; CHECK-SM100-NEXT: shr.u64 %rd6, %rd1, 32; +; CHECK-SM100-NEXT: mov.b64 %rd7, 12; +; CHECK-SM100-NEXT: st.b32 [%rd7], %rd6; +; CHECK-SM100-NEXT: ret; + %2 = zext <2 x i8> %0 to <2 x i32> + %3 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + store <4 x i32> %3, ptr null, align 4 + ret void +} + +define ptx_kernel void @sextend_to_v2i32(<2 x i8> %0) { +; CHECK-SM90A-LABEL: sextend_to_v2i32( +; CHECK-SM90A: { +; CHECK-SM90A-NEXT: .reg .b16 %rs<3>; +; CHECK-SM90A-NEXT: .reg .b32 %r<6>; +; CHECK-SM90A-NEXT: .reg .b64 %rd<5>; +; CHECK-SM90A-EMPTY: +; CHECK-SM90A-NEXT: // %bb.0: +; CHECK-SM90A-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [sextend_to_v2i32_param_0]; +; CHECK-SM90A-NEXT: mov.b32 %r1, {%rs1, %rs2}; +; CHECK-SM90A-NEXT: cvt.u32.u16 %r2, %rs1; +; CHECK-SM90A-NEXT: cvt.s32.s8 %r3, %r2; +; CHECK-SM90A-NEXT: cvt.u32.u16 %r4, %rs2; +; CHECK-SM90A-NEXT: cvt.s32.s8 %r5, %r4; +; CHECK-SM90A-NEXT: mov.b64 %rd1, 12; +; CHECK-SM90A-NEXT: st.b32 [%rd1], %r5; +; CHECK-SM90A-NEXT: mov.b64 %rd2, 8; +; CHECK-SM90A-NEXT: st.b32 [%rd2], %r3; +; CHECK-SM90A-NEXT: mov.b64 %rd3, 4; +; CHECK-SM90A-NEXT: st.b32 [%rd3], 0; +; CHECK-SM90A-NEXT: mov.b64 %rd4, 0; +; CHECK-SM90A-NEXT: st.b32 [%rd4], 0; +; CHECK-SM90A-NEXT: ret; +; +; CHECK-SM100-LABEL: sextend_to_v2i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b16 %rs<3>; +; CHECK-SM100-NEXT: .reg .b32 %r<7>; +; CHECK-SM100-NEXT: .reg .b64 %rd<8>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [sextend_to_v2i32_param_0]; +; CHECK-SM100-NEXT: mov.b32 %r1, {%rs1, %rs2}; +; CHECK-SM100-NEXT: cvt.u32.u16 %r2, %rs2; +; CHECK-SM100-NEXT: cvt.s32.s8 %r3, %r2; +; CHECK-SM100-NEXT: cvt.u32.u16 %r4, %rs1; +; CHECK-SM100-NEXT: cvt.s32.s8 %r5, %r4; +; CHECK-SM100-NEXT: mov.b64 %rd1, {%r5, %r3}; +; CHECK-SM100-NEXT: mov.b32 %r6, 0; +; CHECK-SM100-NEXT: mov.b64 %rd2, {%r6, %r6}; +; CHECK-SM100-NEXT: mov.b64 %rd3, 4; +; CHECK-SM100-NEXT: st.b32 [%rd3], %rd2; +; CHECK-SM100-NEXT: mov.b64 %rd4, 0; +; CHECK-SM100-NEXT: st.b32 [%rd4], %rd2; +; CHECK-SM100-NEXT: mov.b64 %rd5, 8; +; CHECK-SM100-NEXT: st.b32 [%rd5], %rd1; +; CHECK-SM100-NEXT: shr.u64 %rd6, %rd1, 32; +; CHECK-SM100-NEXT: mov.b64 %rd7, 12; +; CHECK-SM100-NEXT: st.b32 [%rd7], %rd6; +; CHECK-SM100-NEXT: ret; + %2 = sext <2 x i8> %0 to <2 x i32> + %3 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + store <4 x i32> %3, ptr null, align 4 + ret void +} ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; CHECK: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll index 1469d49..4b0acda 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-intrinsics.ll @@ -1420,3 +1420,61 @@ define double @tanh_f64(double %a) nounwind { %1 = call double @llvm.tanh.f64(double %a) ret double %1 } + +define { double, double } @test_modf_f64(double %a) nounwind { +; RV32IFD-LABEL: test_modf_f64: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IFD-NEXT: mv a0, sp +; RV32IFD-NEXT: call modf +; RV32IFD-NEXT: fld fa1, 0(sp) +; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: test_modf_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: mv a0, sp +; RV64IFD-NEXT: call modf +; RV64IFD-NEXT: fld fa1, 0(sp) +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: test_modf_f64: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: mv a1, a2 +; RV32I-NEXT: mv a2, sp +; RV32I-NEXT: call modf +; RV32I-NEXT: lw a2, 0(sp) +; RV32I-NEXT: lw a3, 4(sp) +; RV32I-NEXT: sw a0, 0(s0) +; RV32I-NEXT: sw a1, 4(s0) +; RV32I-NEXT: sw a2, 8(s0) +; RV32I-NEXT: sw a3, 12(s0) +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: test_modf_f64: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv a1, sp +; RV64I-NEXT: call modf +; RV64I-NEXT: ld a1, 0(sp) +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %result = call { double, double } @llvm.modf.f64(double %a) + ret { double, double } %result +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll index 23f660b..01d9ceb 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll @@ -2118,3 +2118,62 @@ define float @tanh_f32(float %a) nounwind { %1 = call float @llvm.tanh.f32(float %a) ret float %1 } + +define { float, float } @test_modf_f32(float %a) nounwind { +; RV32IF-LABEL: test_modf_f32: +; RV32IF: # %bb.0: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: addi a0, sp, 8 +; RV32IF-NEXT: call modff +; RV32IF-NEXT: flw fa1, 8(sp) +; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IF-NEXT: addi sp, sp, 16 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: test_modf_f32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IF-NEXT: addi a0, sp, 4 +; RV64IF-NEXT: call modff +; RV64IF-NEXT: flw fa1, 4(sp) +; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; +; RV64IFD-LABEL: test_modf_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: addi a0, sp, 4 +; RV64IFD-NEXT: call modff +; RV64IFD-NEXT: flw fa1, 4(sp) +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: test_modf_f32: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: addi a1, sp, 8 +; RV32I-NEXT: call modff +; RV32I-NEXT: lw a1, 8(sp) +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: test_modf_f32: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: addi a1, sp, 4 +; RV64I-NEXT: call modff +; RV64I-NEXT: lw a1, 4(sp) +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %result = call { float, float } @llvm.modf.f32(float %a) + ret { float, float } %result +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll index ae9b6cc..e7a3f79 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/fp128.ll @@ -911,3 +911,28 @@ define fp128 @tanh(fp128 %a) nounwind { %1 = call fp128 @llvm.tanh.f128(fp128 %a) ret fp128 %1 } + +define { fp128, fp128 } @modf(fp128 %a) nounwind { +; CHECK-LABEL: modf: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -32 +; CHECK-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; CHECK-NEXT: mv s0, a0 +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: mv a1, a2 +; CHECK-NEXT: mv a2, sp +; CHECK-NEXT: call modfl +; CHECK-NEXT: ld a2, 0(sp) +; CHECK-NEXT: ld a3, 8(sp) +; CHECK-NEXT: sd a0, 0(s0) +; CHECK-NEXT: sd a1, 8(s0) +; CHECK-NEXT: sd a2, 16(s0) +; CHECK-NEXT: sd a3, 24(s0) +; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; CHECK-NEXT: addi sp, sp, 32 +; CHECK-NEXT: ret + %result = call { fp128, fp128 } @llvm.modf.f128(fp128 %a) + ret { fp128, fp128 } %result +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir index f1d17f9f..1361d92 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir @@ -506,8 +506,9 @@ # DEBUG-NEXT: .. the first uncovered type index: 1, OK # DEBUG-NEXT: .. the first uncovered imm index: 0, OK # DEBUG-NEXT: G_FMODF (opcode {{[0-9]+}}): 1 type index, 0 imm indices -# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined -# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}} +# DEBUG-NEXT: .. the first uncovered type index: 1, OK +# DEBUG-NEXT: .. the first uncovered imm index: 0, OK # DEBUG-NEXT: G_FPOW (opcode {{[0-9]+}}): 1 type index, 0 imm indices # DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}} # DEBUG-NEXT: .. the first uncovered type index: 1, OK diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll index caeb6e6..aaa08b5 100644 --- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll @@ -2109,3 +2109,85 @@ define double @tanh_f64(double %a) nounwind { %1 = call double @llvm.tanh.f64(double %a) ret double %1 } + +define { double, double } @test_modf_f64(double %a) nounwind { +; RV32IFD-LABEL: test_modf_f64: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IFD-NEXT: mv a0, sp +; RV32IFD-NEXT: call modf +; RV32IFD-NEXT: fld fa1, 0(sp) +; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: test_modf_f64: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: mv a0, sp +; RV64IFD-NEXT: call modf +; RV64IFD-NEXT: fld fa1, 0(sp) +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; +; RV32IZFINXZDINX-LABEL: test_modf_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: mv a2, sp +; RV32IZFINXZDINX-NEXT: call modf +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; +; RV64IZFINXZDINX-LABEL: test_modf_f64: +; RV64IZFINXZDINX: # %bb.0: +; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZFINXZDINX-NEXT: mv a1, sp +; RV64IZFINXZDINX-NEXT: call modf +; RV64IZFINXZDINX-NEXT: ld a1, 0(sp) +; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV64IZFINXZDINX-NEXT: ret +; +; RV32I-LABEL: test_modf_f64: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv a3, a2 +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: mv a2, sp +; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: mv a1, a3 +; RV32I-NEXT: call modf +; RV32I-NEXT: lw a2, 0(sp) +; RV32I-NEXT: lw a3, 4(sp) +; RV32I-NEXT: sw a0, 0(s0) +; RV32I-NEXT: sw a1, 4(s0) +; RV32I-NEXT: sw a2, 8(s0) +; RV32I-NEXT: sw a3, 12(s0) +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: test_modf_f64: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv a1, sp +; RV64I-NEXT: call modf +; RV64I-NEXT: ld a1, 0(sp) +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %result = call { double, double } @llvm.modf.f64(double %a) + ret { double, double } %result +} diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll index b1230ae..5f673ac 100644 --- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll @@ -3050,3 +3050,84 @@ define float @tanh_f32(float %a) nounwind { %1 = call float @llvm.tanh.f32(float %a) ret float %1 } + +define { float, float } @test_modf_f32(float %a) nounwind { +; RV32IF-LABEL: test_modf_f32: +; RV32IF: # %bb.0: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: addi a0, sp, 8 +; RV32IF-NEXT: call modff +; RV32IF-NEXT: flw fa1, 8(sp) +; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IF-NEXT: addi sp, sp, 16 +; RV32IF-NEXT: ret +; +; RV32IZFINX-LABEL: test_modf_f32: +; RV32IZFINX: # %bb.0: +; RV32IZFINX-NEXT: addi sp, sp, -16 +; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINX-NEXT: addi a1, sp, 8 +; RV32IZFINX-NEXT: call modff +; RV32IZFINX-NEXT: lw a1, 8(sp) +; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINX-NEXT: addi sp, sp, 16 +; RV32IZFINX-NEXT: ret +; +; RV64IF-LABEL: test_modf_f32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IF-NEXT: addi a0, sp, 4 +; RV64IF-NEXT: call modff +; RV64IF-NEXT: flw fa1, 4(sp) +; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; +; RV64IZFINX-LABEL: test_modf_f32: +; RV64IZFINX: # %bb.0: +; RV64IZFINX-NEXT: addi sp, sp, -16 +; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZFINX-NEXT: addi a1, sp, 4 +; RV64IZFINX-NEXT: call modff +; RV64IZFINX-NEXT: lw a1, 4(sp) +; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZFINX-NEXT: addi sp, sp, 16 +; RV64IZFINX-NEXT: ret +; +; RV64IFD-LABEL: test_modf_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: addi a0, sp, 4 +; RV64IFD-NEXT: call modff +; RV64IFD-NEXT: flw fa1, 4(sp) +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: test_modf_f32: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: addi a1, sp, 8 +; RV32I-NEXT: call modff +; RV32I-NEXT: lw a1, 8(sp) +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: test_modf_f32: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: addi a1, sp, 4 +; RV64I-NEXT: call modff +; RV64I-NEXT: lw a1, 4(sp) +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %result = call { float, float } @llvm.modf.f32(float %a) + ret { float, float } %result +} diff --git a/llvm/test/CodeGen/RISCV/fp128.ll b/llvm/test/CodeGen/RISCV/fp128.ll index a8e26f7..704cb425 100644 --- a/llvm/test/CodeGen/RISCV/fp128.ll +++ b/llvm/test/CodeGen/RISCV/fp128.ll @@ -189,3 +189,46 @@ define fp128 @fminimum(fp128 %x, fp128 %y) { %a = call fp128 @llvm.minimum.fp128(fp128 %x, fp128 %y) ret fp128 %a } + +define { fp128, fp128 } @modf(fp128 %a) nounwind { +; RV32I-LABEL: modf: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -64 +; RV32I-NEXT: sw ra, 60(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 56(sp) # 4-byte Folded Spill +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw a4, 4(a1) +; RV32I-NEXT: lw a5, 8(a1) +; RV32I-NEXT: lw a6, 12(a1) +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: addi a0, sp, 24 +; RV32I-NEXT: addi a1, sp, 8 +; RV32I-NEXT: addi a2, sp, 40 +; RV32I-NEXT: sw a3, 8(sp) +; RV32I-NEXT: sw a4, 12(sp) +; RV32I-NEXT: sw a5, 16(sp) +; RV32I-NEXT: sw a6, 20(sp) +; RV32I-NEXT: call modfl +; RV32I-NEXT: lw a0, 24(sp) +; RV32I-NEXT: lw a1, 28(sp) +; RV32I-NEXT: lw a2, 32(sp) +; RV32I-NEXT: lw a3, 36(sp) +; RV32I-NEXT: lw a4, 40(sp) +; RV32I-NEXT: lw a5, 44(sp) +; RV32I-NEXT: lw a6, 48(sp) +; RV32I-NEXT: lw a7, 52(sp) +; RV32I-NEXT: sw a4, 16(s0) +; RV32I-NEXT: sw a5, 20(s0) +; RV32I-NEXT: sw a6, 24(s0) +; RV32I-NEXT: sw a7, 28(s0) +; RV32I-NEXT: sw a0, 0(s0) +; RV32I-NEXT: sw a1, 4(s0) +; RV32I-NEXT: sw a2, 8(s0) +; RV32I-NEXT: sw a3, 12(s0) +; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 56(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 64 +; RV32I-NEXT: ret + %result = call { fp128, fp128 } @llvm.modf.f128(fp128 %a) + ret { fp128, fp128 } %result +} diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll index e16d788..847054d 100644 --- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll @@ -4417,3 +4417,161 @@ define half @tanh_f16(half %a) nounwind { %1 = call half @llvm.tanh.f16(half %a) ret half %1 } + +define { half, half } @test_modf_f16(half %a) nounwind { +; RV32IZFH-LABEL: test_modf_f16: +; RV32IZFH: # %bb.0: +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFH-NEXT: fcvt.s.h fa0, fa0 +; RV32IZFH-NEXT: addi a0, sp, 8 +; RV32IZFH-NEXT: call modff +; RV32IZFH-NEXT: flw fa5, 8(sp) +; RV32IZFH-NEXT: fcvt.h.s fa0, fa0 +; RV32IZFH-NEXT: fcvt.h.s fa1, fa5 +; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFH-NEXT: addi sp, sp, 16 +; RV32IZFH-NEXT: ret +; +; RV64IZFH-LABEL: test_modf_f16: +; RV64IZFH: # %bb.0: +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZFH-NEXT: fcvt.s.h fa0, fa0 +; RV64IZFH-NEXT: addi a0, sp, 4 +; RV64IZFH-NEXT: call modff +; RV64IZFH-NEXT: flw fa5, 4(sp) +; RV64IZFH-NEXT: fcvt.h.s fa0, fa0 +; RV64IZFH-NEXT: fcvt.h.s fa1, fa5 +; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZFH-NEXT: addi sp, sp, 16 +; RV64IZFH-NEXT: ret +; +; RV32IZHINX-LABEL: test_modf_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: addi a1, sp, 8 +; RV32IZHINX-NEXT: call modff +; RV32IZHINX-NEXT: lw a1, 8(sp) +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: fcvt.h.s a1, a1 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_modf_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: addi a1, sp, 4 +; RV64IZHINX-NEXT: call modff +; RV64IZHINX-NEXT: lw a1, 4(sp) +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: fcvt.h.s a1, a1 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; +; RV32I-LABEL: test_modf_f16: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: srli a0, a0, 16 +; RV32I-NEXT: call __extendhfsf2 +; RV32I-NEXT: addi a1, sp, 4 +; RV32I-NEXT: call modff +; RV32I-NEXT: call __truncsfhf2 +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: lw a0, 4(sp) +; RV32I-NEXT: call __truncsfhf2 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: test_modf_f16: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64I-NEXT: slli a0, a0, 48 +; RV64I-NEXT: srli a0, a0, 48 +; RV64I-NEXT: call __extendhfsf2 +; RV64I-NEXT: addi a1, sp, 12 +; RV64I-NEXT: call modff +; RV64I-NEXT: call __truncsfhf2 +; RV64I-NEXT: mv s0, a0 +; RV64I-NEXT: lw a0, 12(sp) +; RV64I-NEXT: call __truncsfhf2 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret +; +; RV32IZFHMIN-LABEL: test_modf_f16: +; RV32IZFHMIN: # %bb.0: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0 +; RV32IZFHMIN-NEXT: addi a0, sp, 8 +; RV32IZFHMIN-NEXT: call modff +; RV32IZFHMIN-NEXT: flw fa5, 8(sp) +; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0 +; RV32IZFHMIN-NEXT: fcvt.h.s fa1, fa5 +; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFHMIN-NEXT: addi sp, sp, 16 +; RV32IZFHMIN-NEXT: ret +; +; RV64IZFHMIN-LABEL: test_modf_f16: +; RV64IZFHMIN: # %bb.0: +; RV64IZFHMIN-NEXT: addi sp, sp, -16 +; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0 +; RV64IZFHMIN-NEXT: addi a0, sp, 4 +; RV64IZFHMIN-NEXT: call modff +; RV64IZFHMIN-NEXT: flw fa5, 4(sp) +; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0 +; RV64IZFHMIN-NEXT: fcvt.h.s fa1, fa5 +; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZFHMIN-NEXT: addi sp, sp, 16 +; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_modf_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: addi a1, sp, 8 +; RV32IZHINXMIN-NEXT: call modff +; RV32IZHINXMIN-NEXT: lw a1, 8(sp) +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.h.s a1, a1 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_modf_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: addi a1, sp, 4 +; RV64IZHINXMIN-NEXT: call modff +; RV64IZHINXMIN-NEXT: lw a1, 4(sp) +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a1, a1 +; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret + %result = call { half, half } @llvm.modf.f16(half %a) + ret { half, half } %result +} diff --git a/llvm/test/CodeGen/RISCV/rvv/remat.ll b/llvm/test/CodeGen/RISCV/rvv/remat.ll index 06d54fa..95bff27 100644 --- a/llvm/test/CodeGen/RISCV/rvv/remat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/remat.ll @@ -301,3 +301,135 @@ define void @vfmv.s.f(ptr %p, double %x) { store volatile double %x, ptr %p ret void } + +; This test is fairly fragile, but it's trying to cover the case which +; caused the revert of bba9172 due to interaction with how rematerialize +; instructions are pruned from the original live interval. In the result +; below, we remat the vmv.v.x into the loop, but fail to remat the vmv.v.x +; a second time after further splitting it's live range. We shouldn't need +; to spill it to the stack at all. +define i64 @dual_remat(i64 %0, <vscale x 16 x i64> %1, <vscale x 16 x i64> %2, ptr %p) #0 { +; CHECK-LABEL: dual_remat: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a2, a1, 5 +; CHECK-NEXT: add a1, a2, a1 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x21, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 33 * vlenb +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma +; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: srli a1, a2, 3 +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, a3, a2 +; CHECK-NEXT: vmv.v.i v0, 0 +; CHECK-NEXT: .LBB8_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: mv a5, a4 +; CHECK-NEXT: slli a4, a4, 3 +; CHECK-NEXT: add a5, a5, a4 +; CHECK-NEXT: slli a4, a4, 1 +; CHECK-NEXT: add a4, a4, a5 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 +; CHECK-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: slli a5, a4, 4 +; CHECK-NEXT: add a4, a5, a4 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 +; CHECK-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: mv a5, a4 +; CHECK-NEXT: slli a4, a4, 3 +; CHECK-NEXT: add a5, a5, a4 +; CHECK-NEXT: slli a4, a4, 1 +; CHECK-NEXT: add a4, a4, a5 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 +; CHECK-NEXT: vl8r.v v16, (a4) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vand.vv v16, v16, v8 +; CHECK-NEXT: vmsne.vi v24, v16, 0 +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: slli a4, a4, 4 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 +; CHECK-NEXT: vs1r.v v24, (a4) # vscale x 8-byte Folded Spill +; CHECK-NEXT: vand.vv v16, v0, v8 +; CHECK-NEXT: vmsne.vi v8, v16, 0 +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: mv a5, a4 +; CHECK-NEXT: slli a4, a4, 3 +; CHECK-NEXT: add a5, a5, a4 +; CHECK-NEXT: slli a4, a4, 1 +; CHECK-NEXT: add a4, a4, a5 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 +; CHECK-NEXT: vl8r.v v16, (a4) # vscale x 64-byte Folded Reload +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: slli a4, a4, 4 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 +; CHECK-NEXT: vl1r.v v9, (a4) # vscale x 8-byte Folded Reload +; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma +; CHECK-NEXT: vslideup.vx v9, v8, a1 +; CHECK-NEXT: vsetvli a4, zero, e8, m2, ta, ma +; CHECK-NEXT: vcpop.m a4, v9 +; CHECK-NEXT: csrr a5, vlenb +; CHECK-NEXT: slli a6, a5, 4 +; CHECK-NEXT: add a5, a6, a5 +; CHECK-NEXT: add a5, sp, a5 +; CHECK-NEXT: addi a5, a5, 16 +; CHECK-NEXT: vl8r.v v8, (a5) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vs8r.v v8, (a3) +; CHECK-NEXT: vs8r.v v8, (a2) +; CHECK-NEXT: addi a5, sp, 16 +; CHECK-NEXT: vl8r.v v8, (a5) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; CHECK-NEXT: vor.vv v16, v16, v8 +; CHECK-NEXT: csrr a5, vlenb +; CHECK-NEXT: slli a5, a5, 3 +; CHECK-NEXT: add a5, sp, a5 +; CHECK-NEXT: addi a5, a5, 16 +; CHECK-NEXT: vl8r.v v8, (a5) # vscale x 64-byte Folded Reload +; CHECK-NEXT: vor.vv v0, v0, v8 +; CHECK-NEXT: beqz a4, .LBB8_1 +; CHECK-NEXT: # %bb.2: # %middle.block +; CHECK-NEXT: andi a0, a0, 1 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a2, a1, 5 +; CHECK-NEXT: add a1, a2, a1 +; CHECK-NEXT: add sp, sp, a1 +; CHECK-NEXT: .cfi_def_cfa sp, 16 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: .cfi_def_cfa_offset 0 +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <vscale x 16 x i64> zeroinitializer, i64 %0, i64 0 + %broadcast.splat = shufflevector <vscale x 16 x i64> %broadcast.splatinsert, <vscale x 16 x i64> zeroinitializer, <vscale x 16 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %vec.ind = phi <vscale x 16 x i64> [ zeroinitializer, %entry ], [ %vec.ind.next, %vector.body ] + %3 = and <vscale x 16 x i64> %vec.ind, %broadcast.splat + %4 = icmp ne <vscale x 16 x i64> %3, zeroinitializer + store <vscale x 16 x i64> %broadcast.splat, ptr %p + %5 = tail call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> %4) + %vec.ind.next = or <vscale x 16 x i64> %vec.ind, %1 + br i1 %5, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %and.i = and i64 1, %0 + ret i64 %and.i +} diff --git a/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll b/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll index 9ccd4f1..767ef7e 100644 --- a/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll +++ b/llvm/test/CodeGen/SPARC/2011-01-19-DelaySlot.ll @@ -184,4 +184,29 @@ entry: ret i32 %2 } +define i32 @test_generic_inst(i32 %arg) #0 { +;CHECK-LABEL: test_generic_inst: +;CHECK: ! fake_use: {{.*}} +;CHECK: bne {{.*}} +;CHECK-NEXT: nop + %bar1 = call i32 @bar(i32 %arg) + %even = and i32 %bar1, 1 + %cmp = icmp eq i32 %even, 0 + ; This shouldn't get reordered into a delay slot + call void (...) @llvm.fake.use(i32 %arg) + br i1 %cmp, label %true, label %false +true: + %bar2 = call i32 @bar(i32 %bar1) + br label %cont + +false: + %inc = add nsw i32 %bar1, 1 + br label %cont + +cont: + %ret = phi i32 [ %bar2, %true ], [ %inc, %false ] + ret i32 %ret +} + +declare void @llvm.fake.use(...) attributes #0 = { nounwind "disable-tail-calls"="true" } diff --git a/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll b/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll index 6ef7219..9cf7aab 100644 --- a/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll +++ b/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll @@ -56,14 +56,9 @@ define void @PR90954(ptr %0, ptr %1, i32 %2) nounwind { ; CHECK-LABEL: PR90954: ; CHECK: # %bb.0: ; CHECK-NEXT: pushq %rbp -; CHECK-NEXT: movq %rsp, %rbp -; CHECK-NEXT: pushq %r15 ; CHECK-NEXT: pushq %r14 -; CHECK-NEXT: pushq %r13 -; CHECK-NEXT: pushq %r12 ; CHECK-NEXT: pushq %rbx -; CHECK-NEXT: andq $-1024, %rsp # imm = 0xFC00 -; CHECK-NEXT: subq $5120, %rsp # imm = 0x1400 +; CHECK-NEXT: subq $2912, %rsp # imm = 0xB60 ; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movb $1, {{[0-9]+}}(%rsp) @@ -79,29 +74,26 @@ define void @PR90954(ptr %0, ptr %1, i32 %2) nounwind { ; CHECK-NEXT: movw $64, %cx ; CHECK-NEXT: movw $16, %di ; CHECK-NEXT: movb $1, %r8b -; CHECK-NEXT: movl $64, %r9d -; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %r10 -; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %r11 -; CHECK-NEXT: xorl %ebx, %ebx -; CHECK-NEXT: xorl %r14d, %r14d +; CHECK-NEXT: xorl %r9d, %r9d +; CHECK-NEXT: xorl %r10d, %r10d ; CHECK-NEXT: jmp .LBB1_1 ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB1_5: # in Loop: Header=BB1_1 Depth=1 -; CHECK-NEXT: incq %r14 -; CHECK-NEXT: addl %edx, %ebx +; CHECK-NEXT: incq %r10 +; CHECK-NEXT: addl %edx, %r9d ; CHECK-NEXT: .LBB1_1: # =>This Loop Header: Depth=1 ; CHECK-NEXT: # Child Loop BB1_2 Depth 2 -; CHECK-NEXT: movslq %ebx, %r15 -; CHECK-NEXT: leaq (%rsi,%r15,4), %r15 -; CHECK-NEXT: xorl %r12d, %r12d -; CHECK-NEXT: xorl %r13d, %r13d +; CHECK-NEXT: movslq %r9d, %r11 +; CHECK-NEXT: leaq (%rsi,%r11,4), %r11 +; CHECK-NEXT: xorl %ebx, %ebx +; CHECK-NEXT: xorl %r14d, %r14d ; CHECK-NEXT: jmp .LBB1_2 ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB1_4: # in Loop: Header=BB1_2 Depth=2 -; CHECK-NEXT: tilestored %tmm1, (%r15,%rax) -; CHECK-NEXT: incq %r13 -; CHECK-NEXT: addq $64, %r15 -; CHECK-NEXT: decq %r12 +; CHECK-NEXT: tilestored %tmm1, (%r11,%rax) +; CHECK-NEXT: incq %r14 +; CHECK-NEXT: addq $64, %r11 +; CHECK-NEXT: decq %rbx ; CHECK-NEXT: je .LBB1_5 ; CHECK-NEXT: .LBB1_2: # Parent Loop BB1_1 Depth=1 ; CHECK-NEXT: # => This Inner Loop Header: Depth=2 @@ -110,46 +102,12 @@ define void @PR90954(ptr %0, ptr %1, i32 %2) nounwind { ; CHECK-NEXT: testb %r8b, %r8b ; CHECK-NEXT: jne .LBB1_4 ; CHECK-NEXT: # %bb.3: # in Loop: Header=BB1_2 Depth=2 -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: tileloadd (%r10,%r9), %tmm1 -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp) -; CHECK-NEXT: tileloadd (%r11,%r9), %tmm2 +; CHECK-NEXT: tilezero %tmm1 +; CHECK-NEXT: tilezero %tmm2 ; CHECK-NEXT: tdpbf16ps %tmm2, %tmm1, %tmm0 -; CHECK-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; CHECK-NEXT: movabsq $64, %rax -; CHECK-NEXT: tilestored %tmm0, 3072(%rsp,%rax) # 1024-byte Folded Spill -; CHECK-NEXT: tileloadd 3072(%rsp,%rax), %tmm1 # 1024-byte Folded Reload -; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload +; CHECK-NEXT: movabsq $64, %rbp +; CHECK-NEXT: tilestored %tmm0, 896(%rsp,%rbp) # 1024-byte Folded Spill +; CHECK-NEXT: tileloadd 896(%rsp,%rbp), %tmm1 # 1024-byte Folded Reload ; CHECK-NEXT: jmp .LBB1_4 %4 = shl i32 %2, 4 %5 = icmp eq i64 0, 0 diff --git a/llvm/test/CodeGen/X86/avx2-vector-shifts.ll b/llvm/test/CodeGen/X86/avx2-vector-shifts.ll index 983c69d..95c2eda 100644 --- a/llvm/test/CodeGen/X86/avx2-vector-shifts.ll +++ b/llvm/test/CodeGen/X86/avx2-vector-shifts.ll @@ -441,10 +441,10 @@ define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind { ; CHECK-NEXT: vpsraw $4, %ymm3, %ymm4 ; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; CHECK-NEXT: vpsraw $2, %ymm3, %ymm4 -; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; CHECK-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; CHECK-NEXT: vpsraw $1, %ymm3, %ymm4 -; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; CHECK-NEXT: vpsllw $2, %ymm2, %ymm2 ; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; CHECK-NEXT: vpsrlw $8, %ymm2, %ymm2 ; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -452,10 +452,10 @@ define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind { ; CHECK-NEXT: vpsraw $4, %ymm0, %ymm3 ; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; CHECK-NEXT: vpsraw $2, %ymm0, %ymm3 -; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; CHECK-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; CHECK-NEXT: vpsraw $1, %ymm0, %ymm3 -; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpsllw $2, %ymm1, %ymm1 ; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; CHECK-NEXT: vpsrlw $8, %ymm0, %ymm0 ; CHECK-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/gfni-shifts.ll b/llvm/test/CodeGen/X86/gfni-shifts.ll index cd16651..feac3dc 100644 --- a/llvm/test/CodeGen/X86/gfni-shifts.ll +++ b/llvm/test/CodeGen/X86/gfni-shifts.ll @@ -166,10 +166,10 @@ define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; GFNIAVX1OR2-NEXT: vpsraw $4, %xmm3, %xmm4 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 ; GFNIAVX1OR2-NEXT: vpsraw $2, %xmm3, %xmm4 -; GFNIAVX1OR2-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; GFNIAVX1OR2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; GFNIAVX1OR2-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; GFNIAVX1OR2-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; GFNIAVX1OR2-NEXT: vpsraw $1, %xmm3, %xmm4 -; GFNIAVX1OR2-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; GFNIAVX1OR2-NEXT: vpsllw $2, %xmm2, %xmm2 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; GFNIAVX1OR2-NEXT: vpsrlw $8, %xmm2, %xmm2 ; GFNIAVX1OR2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -177,10 +177,10 @@ define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; GFNIAVX1OR2-NEXT: vpsraw $4, %xmm0, %xmm3 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; GFNIAVX1OR2-NEXT: vpsraw $2, %xmm0, %xmm3 -; GFNIAVX1OR2-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; GFNIAVX1OR2-NEXT: vpaddw %xmm1, %xmm1, %xmm4 +; GFNIAVX1OR2-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 ; GFNIAVX1OR2-NEXT: vpsraw $1, %xmm0, %xmm3 -; GFNIAVX1OR2-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; GFNIAVX1OR2-NEXT: vpsllw $2, %xmm1, %xmm1 ; GFNIAVX1OR2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; GFNIAVX1OR2-NEXT: vpsrlw $8, %xmm0, %xmm0 ; GFNIAVX1OR2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -896,10 +896,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm5, %xmm6 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpsraw $2, %xmm5, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5 +; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm7 +; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpsraw $1, %xmm5, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; GFNIAVX1-NEXT: vpsllw $2, %xmm3, %xmm3 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm3 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -907,10 +907,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm4, %xmm5 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpsraw $2, %xmm4, %xmm5 -; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 +; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm6 +; GFNIAVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpsraw $1, %xmm4, %xmm5 -; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; GFNIAVX1-NEXT: vpsllw $2, %xmm2, %xmm2 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; GFNIAVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 @@ -920,10 +920,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm4, %xmm5 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpsraw $2, %xmm4, %xmm5 -; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4 +; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm6 +; GFNIAVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpsraw $1, %xmm4, %xmm5 -; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; GFNIAVX1-NEXT: vpsllw $2, %xmm3, %xmm3 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -931,10 +931,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm0, %xmm4 ; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpsraw $2, %xmm0, %xmm4 -; GFNIAVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 +; GFNIAVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm5 +; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpsraw $1, %xmm0, %xmm4 -; GFNIAVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; GFNIAVX1-NEXT: vpsllw $2, %xmm1, %xmm1 ; GFNIAVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0 @@ -949,10 +949,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX2-NEXT: vpsraw $4, %ymm3, %ymm4 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; GFNIAVX2-NEXT: vpsraw $2, %ymm3, %ymm4 -; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; GFNIAVX2-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; GFNIAVX2-NEXT: vpsraw $1, %ymm3, %ymm4 -; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; GFNIAVX2-NEXT: vpsllw $2, %ymm2, %ymm2 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -960,10 +960,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX2-NEXT: vpsraw $4, %ymm0, %ymm3 ; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpsraw $2, %ymm0, %ymm3 -; GFNIAVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; GFNIAVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpsraw $1, %ymm0, %ymm3 -; GFNIAVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; GFNIAVX2-NEXT: vpsllw $2, %ymm1, %ymm1 ; GFNIAVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 @@ -977,10 +977,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm3, %ymm4 ; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm3, %ymm4 -; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; GFNIAVX512VL-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm3, %ymm4 -; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; GFNIAVX512VL-NEXT: vpsllw $2, %ymm2, %ymm2 ; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -988,10 +988,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm0, %ymm3 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm0, %ymm3 -; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; GFNIAVX512VL-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm0, %ymm3 -; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; GFNIAVX512VL-NEXT: vpsllw $2, %ymm1, %ymm1 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 @@ -2027,10 +2027,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm7, %xmm8 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm7 ; GFNIAVX1-NEXT: vpsraw $2, %xmm7, %xmm8 -; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5 -; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm7 +; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm9 +; GFNIAVX1-NEXT: vpblendvb %xmm9, %xmm8, %xmm7, %xmm7 ; GFNIAVX1-NEXT: vpsraw $1, %xmm7, %xmm8 -; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5 +; GFNIAVX1-NEXT: vpsllw $2, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm5 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -2038,10 +2038,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm6, %xmm7 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6 ; GFNIAVX1-NEXT: vpsraw $2, %xmm6, %xmm7 -; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6 +; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm8 +; GFNIAVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm6, %xmm6 ; GFNIAVX1-NEXT: vpsraw $1, %xmm6, %xmm7 -; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4 +; GFNIAVX1-NEXT: vpsllw $2, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm4 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpackuswb %xmm5, %xmm4, %xmm4 @@ -2051,10 +2051,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm6, %xmm7 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm6 ; GFNIAVX1-NEXT: vpsraw $2, %xmm6, %xmm7 -; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5 -; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm6 +; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm8 +; GFNIAVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm6, %xmm6 ; GFNIAVX1-NEXT: vpsraw $1, %xmm6, %xmm7 -; GFNIAVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5 +; GFNIAVX1-NEXT: vpsllw $2, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm5 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -2062,10 +2062,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm0, %xmm6 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpsraw $2, %xmm0, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm0, %xmm0 +; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm7 +; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpsraw $1, %xmm0, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; GFNIAVX1-NEXT: vpsllw $2, %xmm2, %xmm2 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 ; GFNIAVX1-NEXT: vpackuswb %xmm5, %xmm0, %xmm0 @@ -2078,10 +2078,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm6, %xmm7 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6 ; GFNIAVX1-NEXT: vpsraw $2, %xmm6, %xmm7 -; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm6 +; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm8 +; GFNIAVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm6, %xmm6 ; GFNIAVX1-NEXT: vpsraw $1, %xmm6, %xmm7 -; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4 +; GFNIAVX1-NEXT: vpsllw $2, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm4 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -2089,10 +2089,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm5, %xmm6 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpsraw $2, %xmm5, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm5 +; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm7 +; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpsraw $1, %xmm5, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; GFNIAVX1-NEXT: vpsllw $2, %xmm2, %xmm2 ; GFNIAVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm2 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; GFNIAVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2 @@ -2102,10 +2102,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm5, %xmm6 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpsraw $2, %xmm5, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4 -; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm5, %xmm5 +; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm7 +; GFNIAVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5 ; GFNIAVX1-NEXT: vpsraw $1, %xmm5, %xmm6 -; GFNIAVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm4 +; GFNIAVX1-NEXT: vpsllw $2, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm5, %xmm4 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; GFNIAVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -2113,10 +2113,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX1-NEXT: vpsraw $4, %xmm1, %xmm5 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm1, %xmm1 ; GFNIAVX1-NEXT: vpsraw $2, %xmm1, %xmm5 -; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm1, %xmm1 +; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm6 +; GFNIAVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1 ; GFNIAVX1-NEXT: vpsraw $1, %xmm1, %xmm5 -; GFNIAVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; GFNIAVX1-NEXT: vpsllw $2, %xmm3, %xmm3 ; GFNIAVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm1, %xmm1 ; GFNIAVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 ; GFNIAVX1-NEXT: vpackuswb %xmm4, %xmm1, %xmm1 @@ -2131,10 +2131,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX2-NEXT: vpsraw $4, %ymm5, %ymm6 ; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5 ; GFNIAVX2-NEXT: vpsraw $2, %ymm5, %ymm6 -; GFNIAVX2-NEXT: vpaddw %ymm4, %ymm4, %ymm4 -; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm5 +; GFNIAVX2-NEXT: vpaddw %ymm4, %ymm4, %ymm7 +; GFNIAVX2-NEXT: vpblendvb %ymm7, %ymm6, %ymm5, %ymm5 ; GFNIAVX2-NEXT: vpsraw $1, %ymm5, %ymm6 -; GFNIAVX2-NEXT: vpaddw %ymm4, %ymm4, %ymm4 +; GFNIAVX2-NEXT: vpsllw $2, %ymm4, %ymm4 ; GFNIAVX2-NEXT: vpblendvb %ymm4, %ymm6, %ymm5, %ymm4 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm4, %ymm4 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -2142,10 +2142,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX2-NEXT: vpsraw $4, %ymm0, %ymm5 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpsraw $2, %ymm0, %ymm5 -; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 +; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm6 +; GFNIAVX2-NEXT: vpblendvb %ymm6, %ymm5, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpsraw $1, %ymm0, %ymm5 -; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; GFNIAVX2-NEXT: vpsllw $2, %ymm2, %ymm2 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 ; GFNIAVX2-NEXT: vpackuswb %ymm4, %ymm0, %ymm0 @@ -2155,10 +2155,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX2-NEXT: vpsraw $4, %ymm4, %ymm5 ; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 ; GFNIAVX2-NEXT: vpsraw $2, %ymm4, %ymm5 -; GFNIAVX2-NEXT: vpaddw %ymm3, %ymm3, %ymm3 -; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 +; GFNIAVX2-NEXT: vpaddw %ymm3, %ymm3, %ymm6 +; GFNIAVX2-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4 ; GFNIAVX2-NEXT: vpsraw $1, %ymm4, %ymm5 -; GFNIAVX2-NEXT: vpaddw %ymm3, %ymm3, %ymm3 +; GFNIAVX2-NEXT: vpsllw $2, %ymm3, %ymm3 ; GFNIAVX2-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm3, %ymm3 ; GFNIAVX2-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -2166,10 +2166,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX2-NEXT: vpsraw $4, %ymm1, %ymm4 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 ; GFNIAVX2-NEXT: vpsraw $2, %ymm1, %ymm4 -; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 +; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; GFNIAVX2-NEXT: vpblendvb %ymm5, %ymm4, %ymm1, %ymm1 ; GFNIAVX2-NEXT: vpsraw $1, %ymm1, %ymm4 -; GFNIAVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; GFNIAVX2-NEXT: vpsllw $2, %ymm2, %ymm2 ; GFNIAVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1 ; GFNIAVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 ; GFNIAVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 @@ -2185,10 +2185,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm5, %ymm6 ; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm5, %ymm6 -; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3 -; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5 +; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm7 +; GFNIAVX512VL-NEXT: vpblendvb %ymm7, %ymm6, %ymm5, %ymm5 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm5, %ymm6 -; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3 +; GFNIAVX512VL-NEXT: vpsllw $2, %ymm3, %ymm3 ; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm3 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -2196,10 +2196,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm4, %ymm5 ; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm4, %ymm5 -; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4 +; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm6 +; GFNIAVX512VL-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm4, %ymm5 -; GFNIAVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; GFNIAVX512VL-NEXT: vpsllw $2, %ymm2, %ymm2 ; GFNIAVX512VL-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm2 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2 ; GFNIAVX512VL-NEXT: vpackuswb %ymm3, %ymm2, %ymm2 @@ -2209,10 +2209,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm4, %ymm5 ; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm4, %ymm5 -; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3 -; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 +; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm6 +; GFNIAVX512VL-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm4, %ymm5 -; GFNIAVX512VL-NEXT: vpaddw %ymm3, %ymm3, %ymm3 +; GFNIAVX512VL-NEXT: vpsllw $2, %ymm3, %ymm3 ; GFNIAVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3 ; GFNIAVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -2220,10 +2220,10 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX512VL-NEXT: vpsraw $4, %ymm0, %ymm4 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpsraw $2, %ymm0, %ymm4 -; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0 +; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm5 +; GFNIAVX512VL-NEXT: vpblendvb %ymm5, %ymm4, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpsraw $1, %ymm0, %ymm4 -; GFNIAVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; GFNIAVX512VL-NEXT: vpsllw $2, %ymm1, %ymm1 ; GFNIAVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0 ; GFNIAVX512VL-NEXT: vpackuswb %ymm3, %ymm0, %ymm0 @@ -2239,11 +2239,11 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1} ; GFNIAVX512BW-NEXT: vpsraw $2, %zmm2, %zmm3 -; GFNIAVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4 -; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1 +; GFNIAVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm5 +; GFNIAVX512BW-NEXT: vpmovb2m %zmm5, %k1 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1} ; GFNIAVX512BW-NEXT: vpsraw $1, %zmm2, %zmm3 -; GFNIAVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4 +; GFNIAVX512BW-NEXT: vpsllw $2, %zmm4, %zmm4 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1} ; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 @@ -2253,11 +2253,11 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1} ; GFNIAVX512BW-NEXT: vpsraw $2, %zmm0, %zmm3 -; GFNIAVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1 -; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1 +; GFNIAVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm4 +; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1} ; GFNIAVX512BW-NEXT: vpsraw $1, %zmm0, %zmm3 -; GFNIAVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1 +; GFNIAVX512BW-NEXT: vpsllw $2, %zmm1, %zmm1 ; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1 ; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1} ; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0 diff --git a/llvm/test/CodeGen/X86/isel-fpclass.ll b/llvm/test/CodeGen/X86/isel-fpclass.ll index 960bbf5..df04b67 100644 --- a/llvm/test/CodeGen/X86/isel-fpclass.ll +++ b/llvm/test/CodeGen/X86/isel-fpclass.ll @@ -1,16 +1,16 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=i686-linux | FileCheck %s -check-prefixes=X86-SDAGISEL +; RUN: llc < %s -mtriple=i686-linux | FileCheck %s -check-prefixes=X86 ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -check-prefixes=X64,X64-SDAGISEL ; RUN: llc < %s -mtriple=i686-linux -fast-isel -fast-isel-abort=1 | FileCheck %s -check-prefixes=X86-FASTISEL ; RUN: llc < %s -mtriple=x86_64-linux -fast-isel -fast-isel-abort=1 | FileCheck %s -check-prefixes=X64,X64-FASTISEL +; RUN: llc < %s -mtriple=i686-linux -global-isel -global-isel-abort=2 | FileCheck %s -check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux -global-isel -global-isel-abort=2 | FileCheck %s -check-prefixes=X64,X64-GISEL -; FIXME: We can reuse/delete llvm/test/CodeGen/X86/is_fpclass.ll when all patches are included. - -define i1 @isnone_f(float %x) { -; X86-SDAGISEL-LABEL: isnone_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: xorl %eax, %eax -; X86-SDAGISEL-NEXT: retl +define i1 @isnone_f(float %x) nounwind { +; X86-LABEL: isnone_f: +; X86: # %bb.0: # %entry +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: retl ; ; X64-LABEL: isnone_f: ; X64: # %bb.0: # %entry @@ -28,11 +28,11 @@ entry: ret i1 %0 } -define i1 @isany_f(float %x) { -; X86-SDAGISEL-LABEL: isany_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movb $1, %al -; X86-SDAGISEL-NEXT: retl +define i1 @isany_f(float %x) nounwind { +; X86-LABEL: isany_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movb $1, %al +; X86-NEXT: retl ; ; X64-LABEL: isany_f: ; X64: # %bb.0: # %entry @@ -50,17 +50,17 @@ entry: ret i1 %0 } -define i1 @issignaling_f(float %x) { -; X86-SDAGISEL-LABEL: issignaling_f: -; X86-SDAGISEL: # %bb.0: -; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF -; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-SDAGISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 -; X86-SDAGISEL-NEXT: setl %cl -; X86-SDAGISEL-NEXT: cmpl $2139095041, %eax # imm = 0x7F800001 -; X86-SDAGISEL-NEXT: setge %al -; X86-SDAGISEL-NEXT: andb %cl, %al -; X86-SDAGISEL-NEXT: retl +define i1 @issignaling_f(float %x) nounwind { +; X86-LABEL: issignaling_f: +; X86: # %bb.0: +; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 +; X86-NEXT: setl %cl +; X86-NEXT: cmpl $2139095041, %eax # imm = 0x7F800001 +; X86-NEXT: setge %al +; X86-NEXT: andb %cl, %al +; X86-NEXT: retl ; ; X64-LABEL: issignaling_f: ; X64: # %bb.0: @@ -76,7 +76,6 @@ define i1 @issignaling_f(float %x) { ; X86-FASTISEL-LABEL: issignaling_f: ; X86-FASTISEL: # %bb.0: ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF @@ -87,20 +86,19 @@ define i1 @issignaling_f(float %x) { ; X86-FASTISEL-NEXT: setge %al ; X86-FASTISEL-NEXT: andb %cl, %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl %a0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1) ; "snan" ret i1 %a0 } - define i1 @isquiet_f(float %x) { -; X86-SDAGISEL-LABEL: isquiet_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF -; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-SDAGISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 -; X86-SDAGISEL-NEXT: setge %al -; X86-SDAGISEL-NEXT: retl + define i1 @isquiet_f(float %x) nounwind { +; X86-LABEL: isquiet_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 +; X86-NEXT: setge %al +; X86-NEXT: retl ; ; X64-LABEL: isquiet_f: ; X64: # %bb.0: # %entry @@ -113,7 +111,6 @@ define i1 @issignaling_f(float %x) { ; X86-FASTISEL-LABEL: isquiet_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF @@ -121,21 +118,20 @@ define i1 @issignaling_f(float %x) { ; X86-FASTISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 ; X86-FASTISEL-NEXT: setge %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 2) ; "qnan" ret i1 %0 } -define i1 @not_isquiet_f(float %x) { -; X86-SDAGISEL-LABEL: not_isquiet_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF -; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-SDAGISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 -; X86-SDAGISEL-NEXT: setl %al -; X86-SDAGISEL-NEXT: retl +define i1 @not_isquiet_f(float %x) nounwind { +; X86-LABEL: not_isquiet_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 +; X86-NEXT: setl %al +; X86-NEXT: retl ; ; X64-LABEL: not_isquiet_f: ; X64: # %bb.0: # %entry @@ -148,7 +144,6 @@ define i1 @not_isquiet_f(float %x) { ; X86-FASTISEL-LABEL: not_isquiet_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF @@ -156,21 +151,20 @@ define i1 @not_isquiet_f(float %x) { ; X86-FASTISEL-NEXT: cmpl $2143289344, %eax # imm = 0x7FC00000 ; X86-FASTISEL-NEXT: setl %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1021) ; ~"qnan" ret i1 %0 } -define i1 @isinf_f(float %x) { -; X86-SDAGISEL-LABEL: isinf_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF -; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 -; X86-SDAGISEL-NEXT: sete %al -; X86-SDAGISEL-NEXT: retl +define i1 @isinf_f(float %x) nounwind { +; X86-LABEL: isinf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 +; X86-NEXT: sete %al +; X86-NEXT: retl ; ; X64-LABEL: isinf_f: ; X64: # %bb.0: # %entry @@ -183,7 +177,6 @@ define i1 @isinf_f(float %x) { ; X86-FASTISEL-LABEL: isinf_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF @@ -191,21 +184,20 @@ define i1 @isinf_f(float %x) { ; X86-FASTISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 ; X86-FASTISEL-NEXT: sete %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 516) ; 0x204 = "inf" ret i1 %0 } -define i1 @not_isinf_f(float %x) { -; X86-SDAGISEL-LABEL: not_isinf_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF -; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 -; X86-SDAGISEL-NEXT: setne %al -; X86-SDAGISEL-NEXT: retl +define i1 @not_isinf_f(float %x) nounwind { +; X86-LABEL: not_isinf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 +; X86-NEXT: setne %al +; X86-NEXT: retl ; ; X64-LABEL: not_isinf_f: ; X64: # %bb.0: # %entry @@ -218,7 +210,6 @@ define i1 @not_isinf_f(float %x) { ; X86-FASTISEL-LABEL: not_isinf_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF @@ -226,19 +217,18 @@ define i1 @not_isinf_f(float %x) { ; X86-FASTISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 ; X86-FASTISEL-NEXT: setne %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 507) ; ~0x204 = "~inf" ret i1 %0 } -define i1 @is_plus_inf_f(float %x) { -; X86-SDAGISEL-LABEL: is_plus_inf_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000 -; X86-SDAGISEL-NEXT: sete %al -; X86-SDAGISEL-NEXT: retl +define i1 @is_plus_inf_f(float %x) nounwind { +; X86-LABEL: is_plus_inf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000 +; X86-NEXT: sete %al +; X86-NEXT: retl ; ; X64-LABEL: is_plus_inf_f: ; X64: # %bb.0: # %entry @@ -250,25 +240,23 @@ define i1 @is_plus_inf_f(float %x) { ; X86-FASTISEL-LABEL: is_plus_inf_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: cmpl $2139095040, (%esp) # imm = 0x7F800000 ; X86-FASTISEL-NEXT: sete %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 512) ; 0x200 = "+inf" ret i1 %0 } -define i1 @is_minus_inf_f(float %x) { -; X86-SDAGISEL-LABEL: is_minus_inf_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000 -; X86-SDAGISEL-NEXT: sete %al -; X86-SDAGISEL-NEXT: retl +define i1 @is_minus_inf_f(float %x) nounwind { +; X86-LABEL: is_minus_inf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000 +; X86-NEXT: sete %al +; X86-NEXT: retl ; ; X64-LABEL: is_minus_inf_f: ; X64: # %bb.0: # %entry @@ -280,25 +268,23 @@ define i1 @is_minus_inf_f(float %x) { ; X86-FASTISEL-LABEL: is_minus_inf_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: cmpl $-8388608, (%esp) # imm = 0xFF800000 ; X86-FASTISEL-NEXT: sete %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 4) ; "-inf" ret i1 %0 } -define i1 @not_is_minus_inf_f(float %x) { -; X86-SDAGISEL-LABEL: not_is_minus_inf_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000 -; X86-SDAGISEL-NEXT: setne %al -; X86-SDAGISEL-NEXT: retl +define i1 @not_is_minus_inf_f(float %x) nounwind { +; X86-LABEL: not_is_minus_inf_f: +; X86: # %bb.0: # %entry +; X86-NEXT: cmpl $-8388608, {{[0-9]+}}(%esp) # imm = 0xFF800000 +; X86-NEXT: setne %al +; X86-NEXT: retl ; ; X64-LABEL: not_is_minus_inf_f: ; X64: # %bb.0: # %entry @@ -310,27 +296,25 @@ define i1 @not_is_minus_inf_f(float %x) { ; X86-FASTISEL-LABEL: not_is_minus_inf_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: cmpl $-8388608, (%esp) # imm = 0xFF800000 ; X86-FASTISEL-NEXT: setne %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 1019) ; ~"-inf" ret i1 %0 } -define i1 @isfinite_f(float %x) { -; X86-SDAGISEL-LABEL: isfinite_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF -; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 -; X86-SDAGISEL-NEXT: setl %al -; X86-SDAGISEL-NEXT: retl +define i1 @isfinite_f(float %x) nounwind { +; X86-LABEL: isfinite_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 +; X86-NEXT: setl %al +; X86-NEXT: retl ; ; X64-LABEL: isfinite_f: ; X64: # %bb.0: # %entry @@ -343,7 +327,6 @@ define i1 @isfinite_f(float %x) { ; X86-FASTISEL-LABEL: isfinite_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF @@ -351,21 +334,20 @@ define i1 @isfinite_f(float %x) { ; X86-FASTISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 ; X86-FASTISEL-NEXT: setl %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 504) ; 0x1f8 = "finite" ret i1 %0 } -define i1 @not_isfinite_f(float %x) { -; X86-SDAGISEL-LABEL: not_isfinite_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF -; X86-SDAGISEL-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-SDAGISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 -; X86-SDAGISEL-NEXT: setge %al -; X86-SDAGISEL-NEXT: retl +define i1 @not_isfinite_f(float %x) nounwind { +; X86-LABEL: not_isfinite_f: +; X86: # %bb.0: # %entry +; X86-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF +; X86-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 +; X86-NEXT: setge %al +; X86-NEXT: retl ; ; X64-LABEL: not_isfinite_f: ; X64: # %bb.0: # %entry @@ -378,7 +360,6 @@ define i1 @not_isfinite_f(float %x) { ; X86-FASTISEL-LABEL: not_isfinite_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF @@ -386,19 +367,18 @@ define i1 @not_isfinite_f(float %x) { ; X86-FASTISEL-NEXT: cmpl $2139095040, %eax # imm = 0x7F800000 ; X86-FASTISEL-NEXT: setge %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 519) ; ~0x1f8 = "~finite" ret i1 %0 } -define i1 @is_plus_finite_f(float %x) { -; X86-SDAGISEL-LABEL: is_plus_finite_f: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000 -; X86-SDAGISEL-NEXT: setb %al -; X86-SDAGISEL-NEXT: retl +define i1 @is_plus_finite_f(float %x) nounwind { +; X86-LABEL: is_plus_finite_f: +; X86: # %bb.0: # %entry +; X86-NEXT: cmpl $2139095040, {{[0-9]+}}(%esp) # imm = 0x7F800000 +; X86-NEXT: setb %al +; X86-NEXT: retl ; ; X64-LABEL: is_plus_finite_f: ; X64: # %bb.0: # %entry @@ -410,13 +390,11 @@ define i1 @is_plus_finite_f(float %x) { ; X86-FASTISEL-LABEL: is_plus_finite_f: ; X86-FASTISEL: # %bb.0: # %entry ; X86-FASTISEL-NEXT: pushl %eax -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 8 ; X86-FASTISEL-NEXT: flds {{[0-9]+}}(%esp) ; X86-FASTISEL-NEXT: fstps (%esp) ; X86-FASTISEL-NEXT: cmpl $2139095040, (%esp) # imm = 0x7F800000 ; X86-FASTISEL-NEXT: setb %al ; X86-FASTISEL-NEXT: popl %ecx -; X86-FASTISEL-NEXT: .cfi_def_cfa_offset 4 ; X86-FASTISEL-NEXT: retl entry: %0 = tail call i1 @llvm.is.fpclass.f32(float %x, i32 448) ; 0x1c0 = "+finite" @@ -424,10 +402,10 @@ entry: } define i1 @isnone_d(double %x) nounwind { -; X86-SDAGISEL-LABEL: isnone_d: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: xorl %eax, %eax -; X86-SDAGISEL-NEXT: retl +; X86-LABEL: isnone_d: +; X86: # %bb.0: # %entry +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: retl ; ; X64-LABEL: isnone_d: ; X64: # %bb.0: # %entry @@ -446,10 +424,10 @@ entry: } define i1 @isany_d(double %x) nounwind { -; X86-SDAGISEL-LABEL: isany_d: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movb $1, %al -; X86-SDAGISEL-NEXT: retl +; X86-LABEL: isany_d: +; X86: # %bb.0: # %entry +; X86-NEXT: movb $1, %al +; X86-NEXT: retl ; ; X64-LABEL: isany_d: ; X64: # %bb.0: # %entry @@ -468,10 +446,10 @@ entry: } define i1 @isnone_f80(x86_fp80 %x) nounwind { -; X86-SDAGISEL-LABEL: isnone_f80: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: xorl %eax, %eax -; X86-SDAGISEL-NEXT: retl +; X86-LABEL: isnone_f80: +; X86: # %bb.0: # %entry +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: retl ; ; X64-SDAGISEL-LABEL: isnone_f80: ; X64-SDAGISEL: # %bb.0: # %entry @@ -491,16 +469,21 @@ define i1 @isnone_f80(x86_fp80 %x) nounwind { ; X64-FASTISEL-NEXT: fstp %st(0) ; X64-FASTISEL-NEXT: xorl %eax, %eax ; X64-FASTISEL-NEXT: retq +; +; X64-GISEL-LABEL: isnone_f80: +; X64-GISEL: # %bb.0: # %entry +; X64-GISEL-NEXT: xorl %eax, %eax +; X64-GISEL-NEXT: retq entry: %0 = tail call i1 @llvm.is.fpclass.f80(x86_fp80 %x, i32 0) ret i1 %0 } define i1 @isany_f80(x86_fp80 %x) nounwind { -; X86-SDAGISEL-LABEL: isany_f80: -; X86-SDAGISEL: # %bb.0: # %entry -; X86-SDAGISEL-NEXT: movb $1, %al -; X86-SDAGISEL-NEXT: retl +; X86-LABEL: isany_f80: +; X86: # %bb.0: # %entry +; X86-NEXT: movb $1, %al +; X86-NEXT: retl ; ; X64-SDAGISEL-LABEL: isany_f80: ; X64-SDAGISEL: # %bb.0: # %entry @@ -520,6 +503,11 @@ define i1 @isany_f80(x86_fp80 %x) nounwind { ; X64-FASTISEL-NEXT: fstp %st(0) ; X64-FASTISEL-NEXT: movb $1, %al ; X64-FASTISEL-NEXT: retq +; +; X64-GISEL-LABEL: isany_f80: +; X64-GISEL: # %bb.0: # %entry +; X64-GISEL-NEXT: movb $1, %al +; X64-GISEL-NEXT: retq entry: %0 = tail call i1 @llvm.is.fpclass.f80(x86_fp80 %x, i32 1023) ret i1 %0 diff --git a/llvm/test/CodeGen/X86/isel-smax.ll b/llvm/test/CodeGen/X86/isel-smax.ll index 9c9a48e..1ce0a80 100644 --- a/llvm/test/CodeGen/X86/isel-smax.ll +++ b/llvm/test/CodeGen/X86/isel-smax.ll @@ -1,19 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64 +; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86 define i8 @smax_i8(i8 %a, i8 %b) nounwind readnone { -; X64-LABEL: smax_i8: -; X64: # %bb.0: -; X64-NEXT: movl %esi, %eax -; X64-NEXT: cmpb %al, %dil -; X64-NEXT: cmovgl %edi, %eax -; X64-NEXT: # kill: def $al killed $al killed $eax -; X64-NEXT: retq +; DAG-X64-LABEL: smax_i8: +; DAG-X64: # %bb.0: +; DAG-X64-NEXT: movl %esi, %eax +; DAG-X64-NEXT: cmpb %al, %dil +; DAG-X64-NEXT: cmovgl %edi, %eax +; DAG-X64-NEXT: # kill: def $al killed $al killed $eax +; DAG-X64-NEXT: retq ; ; FASTISEL-X64-LABEL: smax_i8: ; FASTISEL-X64: # %bb.0: @@ -24,6 +24,17 @@ define i8 @smax_i8(i8 %a, i8 %b) nounwind readnone { ; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax ; FASTISEL-X64-NEXT: retq ; +; GISEL-X64-LABEL: smax_i8: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %esi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpb %al, %dil +; GISEL-X64-NEXT: setg %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovnew %di, %ax +; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X64-NEXT: retq +; ; X86-LABEL: smax_i8: ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx @@ -35,16 +46,20 @@ define i8 @smax_i8(i8 %a, i8 %b) nounwind readnone { ; X86-NEXT: .LBB0_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: smax_i8: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpb %cl, %al -; FASTISEL-X86-NEXT: jg .LBB0_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB0_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: smax_i8: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpb %al, %cl +; GISEL-X86-NEXT: setg %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB0_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB0_2: +; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X86-NEXT: retl %ret = call i8 @llvm.smax.i8(i8 %a, i8 %b) ret i8 %ret } @@ -57,25 +72,28 @@ define i16 @smax_i16(i16 %a, i16 %b) nounwind readnone { ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smax_i16: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpw %ax, %di -; FASTISEL-X64-NEXT: cmovgl %edi, %eax -; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smax_i16: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpw %si, %ax +; GISEL-X64-NEXT: setg %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovew %si, %ax +; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: smax_i16: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmpw %cx, %ax -; X86-NEXT: jg .LBB1_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: .LBB1_2: -; X86-NEXT: # kill: def $ax killed $ax killed $eax -; X86-NEXT: retl +; DAG-X86-LABEL: smax_i16: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: cmpw %cx, %ax +; DAG-X86-NEXT: jg .LBB1_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: .LBB1_2: +; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: smax_i16: ; FASTISEL-X86: # %bb.0: @@ -88,6 +106,21 @@ define i16 @smax_i16(i16 %a, i16 %b) nounwind readnone { ; FASTISEL-X86-NEXT: .LBB1_2: ; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: smax_i16: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpw %ax, %cx +; GISEL-X86-NEXT: setg %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB1_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB1_2: +; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X86-NEXT: retl %ret = call i16 @llvm.smax.i16(i16 %a, i16 %b) ret i16 %ret } @@ -99,12 +132,15 @@ define i32 @smax_i32(i32 %a, i32 %b) nounwind readnone { ; X64-NEXT: cmovgl %edi, %eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smax_i32: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpl %esi, %edi -; FASTISEL-X64-NEXT: cmovgl %edi, %eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smax_i32: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpl %esi, %edi +; GISEL-X64-NEXT: setg %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovel %esi, %eax +; GISEL-X64-NEXT: retq ; ; X86-LABEL: smax_i32: ; X86: # %bb.0: @@ -117,16 +153,19 @@ define i32 @smax_i32(i32 %a, i32 %b) nounwind readnone { ; X86-NEXT: .LBB2_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: smax_i32: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpl %ecx, %eax -; FASTISEL-X86-NEXT: jg .LBB2_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB2_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: smax_i32: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpl %eax, %ecx +; GISEL-X86-NEXT: setg %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB2_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB2_2: +; GISEL-X86-NEXT: retl %ret = call i32 @llvm.smax.i32(i32 %a, i32 %b) ret i32 %ret } @@ -138,32 +177,35 @@ define i64 @smax_i64(i64 %a, i64 %b) nounwind readnone { ; X64-NEXT: cmovgq %rdi, %rax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smax_i64: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movq %rsi, %rax -; FASTISEL-X64-NEXT: cmpq %rsi, %rdi -; FASTISEL-X64-NEXT: cmovgq %rdi, %rax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smax_i64: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movq %rdi, %rax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpq %rsi, %rdi +; GISEL-X64-NEXT: setg %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmoveq %rsi, %rax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: smax_i64: -; X86: # %bb.0: -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: cmpl %eax, %ecx -; X86-NEXT: movl %esi, %edi -; X86-NEXT: sbbl %edx, %edi -; X86-NEXT: jl .LBB3_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: movl %esi, %edx -; X86-NEXT: .LBB3_2: -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: retl +; DAG-X86-LABEL: smax_i64: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: pushl %edi +; DAG-X86-NEXT: pushl %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; DAG-X86-NEXT: cmpl %eax, %ecx +; DAG-X86-NEXT: movl %esi, %edi +; DAG-X86-NEXT: sbbl %edx, %edi +; DAG-X86-NEXT: jl .LBB3_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: movl %esi, %edx +; DAG-X86-NEXT: .LBB3_2: +; DAG-X86-NEXT: popl %esi +; DAG-X86-NEXT: popl %edi +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: smax_i64: ; FASTISEL-X86: # %bb.0: @@ -184,6 +226,44 @@ define i64 @smax_i64(i64 %a, i64 %b) nounwind readnone { ; FASTISEL-X86-NEXT: popl %esi ; FASTISEL-X86-NEXT: popl %edi ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: smax_i64: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %ebp +; GISEL-X86-NEXT: pushl %ebx +; GISEL-X86-NEXT: pushl %edi +; GISEL-X86-NEXT: pushl %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; GISEL-X86-NEXT: cmpl %eax, %esi +; GISEL-X86-NEXT: seta %bl +; GISEL-X86-NEXT: xorl %ecx, %ecx +; GISEL-X86-NEXT: cmpl %edx, %ebp +; GISEL-X86-NEXT: setg %bh +; GISEL-X86-NEXT: sete %cl +; GISEL-X86-NEXT: testl %ecx, %ecx +; GISEL-X86-NEXT: je .LBB3_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movb %bl, %bh +; GISEL-X86-NEXT: .LBB3_2: +; GISEL-X86-NEXT: movzbl %bh, %edi +; GISEL-X86-NEXT: andl $1, %edi +; GISEL-X86-NEXT: je .LBB3_4 +; GISEL-X86-NEXT: # %bb.3: +; GISEL-X86-NEXT: movl %esi, %eax +; GISEL-X86-NEXT: .LBB3_4: +; GISEL-X86-NEXT: testl %edi, %edi +; GISEL-X86-NEXT: je .LBB3_6 +; GISEL-X86-NEXT: # %bb.5: +; GISEL-X86-NEXT: movl %ebp, %edx +; GISEL-X86-NEXT: .LBB3_6: +; GISEL-X86-NEXT: popl %esi +; GISEL-X86-NEXT: popl %edi +; GISEL-X86-NEXT: popl %ebx +; GISEL-X86-NEXT: popl %ebp +; GISEL-X86-NEXT: retl %ret = call i64 @llvm.smax.i64(i64 %a, i64 %b) ret i64 %ret } diff --git a/llvm/test/CodeGen/X86/isel-smin.ll b/llvm/test/CodeGen/X86/isel-smin.ll index 7349a7c..bbed3c3 100644 --- a/llvm/test/CodeGen/X86/isel-smin.ll +++ b/llvm/test/CodeGen/X86/isel-smin.ll @@ -1,19 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64 +; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86 define i8 @smin_i8(i8 %a, i8 %b) nounwind readnone { -; X64-LABEL: smin_i8: -; X64: # %bb.0: -; X64-NEXT: movl %esi, %eax -; X64-NEXT: cmpb %al, %dil -; X64-NEXT: cmovll %edi, %eax -; X64-NEXT: # kill: def $al killed $al killed $eax -; X64-NEXT: retq +; DAG-X64-LABEL: smin_i8: +; DAG-X64: # %bb.0: +; DAG-X64-NEXT: movl %esi, %eax +; DAG-X64-NEXT: cmpb %al, %dil +; DAG-X64-NEXT: cmovll %edi, %eax +; DAG-X64-NEXT: # kill: def $al killed $al killed $eax +; DAG-X64-NEXT: retq ; ; FASTISEL-X64-LABEL: smin_i8: ; FASTISEL-X64: # %bb.0: @@ -24,6 +24,17 @@ define i8 @smin_i8(i8 %a, i8 %b) nounwind readnone { ; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax ; FASTISEL-X64-NEXT: retq ; +; GISEL-X64-LABEL: smin_i8: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %esi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpb %al, %dil +; GISEL-X64-NEXT: setl %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovnew %di, %ax +; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X64-NEXT: retq +; ; X86-LABEL: smin_i8: ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx @@ -35,16 +46,20 @@ define i8 @smin_i8(i8 %a, i8 %b) nounwind readnone { ; X86-NEXT: .LBB0_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: smin_i8: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpb %cl, %al -; FASTISEL-X86-NEXT: jl .LBB0_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB0_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: smin_i8: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpb %al, %cl +; GISEL-X86-NEXT: setl %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB0_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB0_2: +; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X86-NEXT: retl %ret = call i8 @llvm.smin.i8(i8 %a, i8 %b) ret i8 %ret } @@ -57,25 +72,28 @@ define i16 @smin_i16(i16 %a, i16 %b) nounwind readnone { ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smin_i16: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpw %ax, %di -; FASTISEL-X64-NEXT: cmovll %edi, %eax -; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smin_i16: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpw %si, %ax +; GISEL-X64-NEXT: setl %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovew %si, %ax +; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: smin_i16: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmpw %cx, %ax -; X86-NEXT: jl .LBB1_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: .LBB1_2: -; X86-NEXT: # kill: def $ax killed $ax killed $eax -; X86-NEXT: retl +; DAG-X86-LABEL: smin_i16: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: cmpw %cx, %ax +; DAG-X86-NEXT: jl .LBB1_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: .LBB1_2: +; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: smin_i16: ; FASTISEL-X86: # %bb.0: @@ -88,6 +106,21 @@ define i16 @smin_i16(i16 %a, i16 %b) nounwind readnone { ; FASTISEL-X86-NEXT: .LBB1_2: ; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: smin_i16: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpw %ax, %cx +; GISEL-X86-NEXT: setl %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB1_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB1_2: +; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X86-NEXT: retl %ret = call i16 @llvm.smin.i16(i16 %a, i16 %b) ret i16 %ret } @@ -99,12 +132,15 @@ define i32 @smin_i32(i32 %a, i32 %b) nounwind readnone { ; X64-NEXT: cmovll %edi, %eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smin_i32: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpl %esi, %edi -; FASTISEL-X64-NEXT: cmovll %edi, %eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smin_i32: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpl %esi, %edi +; GISEL-X64-NEXT: setl %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovel %esi, %eax +; GISEL-X64-NEXT: retq ; ; X86-LABEL: smin_i32: ; X86: # %bb.0: @@ -117,16 +153,19 @@ define i32 @smin_i32(i32 %a, i32 %b) nounwind readnone { ; X86-NEXT: .LBB2_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: smin_i32: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpl %ecx, %eax -; FASTISEL-X86-NEXT: jl .LBB2_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB2_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: smin_i32: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpl %eax, %ecx +; GISEL-X86-NEXT: setl %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB2_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB2_2: +; GISEL-X86-NEXT: retl %ret = call i32 @llvm.smin.i32(i32 %a, i32 %b) ret i32 %ret } @@ -138,32 +177,35 @@ define i64 @smin_i64(i64 %a, i64 %b) nounwind readnone { ; X64-NEXT: cmovlq %rdi, %rax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: smin_i64: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movq %rsi, %rax -; FASTISEL-X64-NEXT: cmpq %rsi, %rdi -; FASTISEL-X64-NEXT: cmovlq %rdi, %rax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: smin_i64: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movq %rdi, %rax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpq %rsi, %rdi +; GISEL-X64-NEXT: setl %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmoveq %rsi, %rax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: smin_i64: -; X86: # %bb.0: -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: cmpl %ecx, %eax -; X86-NEXT: movl %edx, %edi -; X86-NEXT: sbbl %esi, %edi -; X86-NEXT: jl .LBB3_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: movl %esi, %edx -; X86-NEXT: .LBB3_2: -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: retl +; DAG-X86-LABEL: smin_i64: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: pushl %edi +; DAG-X86-NEXT: pushl %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; DAG-X86-NEXT: cmpl %ecx, %eax +; DAG-X86-NEXT: movl %edx, %edi +; DAG-X86-NEXT: sbbl %esi, %edi +; DAG-X86-NEXT: jl .LBB3_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: movl %esi, %edx +; DAG-X86-NEXT: .LBB3_2: +; DAG-X86-NEXT: popl %esi +; DAG-X86-NEXT: popl %edi +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: smin_i64: ; FASTISEL-X86: # %bb.0: @@ -184,6 +226,44 @@ define i64 @smin_i64(i64 %a, i64 %b) nounwind readnone { ; FASTISEL-X86-NEXT: popl %esi ; FASTISEL-X86-NEXT: popl %edi ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: smin_i64: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %ebp +; GISEL-X86-NEXT: pushl %ebx +; GISEL-X86-NEXT: pushl %edi +; GISEL-X86-NEXT: pushl %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; GISEL-X86-NEXT: cmpl %eax, %esi +; GISEL-X86-NEXT: setb %bl +; GISEL-X86-NEXT: xorl %ecx, %ecx +; GISEL-X86-NEXT: cmpl %edx, %ebp +; GISEL-X86-NEXT: setl %bh +; GISEL-X86-NEXT: sete %cl +; GISEL-X86-NEXT: testl %ecx, %ecx +; GISEL-X86-NEXT: je .LBB3_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movb %bl, %bh +; GISEL-X86-NEXT: .LBB3_2: +; GISEL-X86-NEXT: movzbl %bh, %edi +; GISEL-X86-NEXT: andl $1, %edi +; GISEL-X86-NEXT: je .LBB3_4 +; GISEL-X86-NEXT: # %bb.3: +; GISEL-X86-NEXT: movl %esi, %eax +; GISEL-X86-NEXT: .LBB3_4: +; GISEL-X86-NEXT: testl %edi, %edi +; GISEL-X86-NEXT: je .LBB3_6 +; GISEL-X86-NEXT: # %bb.5: +; GISEL-X86-NEXT: movl %ebp, %edx +; GISEL-X86-NEXT: .LBB3_6: +; GISEL-X86-NEXT: popl %esi +; GISEL-X86-NEXT: popl %edi +; GISEL-X86-NEXT: popl %ebx +; GISEL-X86-NEXT: popl %ebp +; GISEL-X86-NEXT: retl %ret = call i64 @llvm.smin.i64(i64 %a, i64 %b) ret i64 %ret } diff --git a/llvm/test/CodeGen/X86/isel-umax.ll b/llvm/test/CodeGen/X86/isel-umax.ll index a90456c..990af26 100644 --- a/llvm/test/CodeGen/X86/isel-umax.ll +++ b/llvm/test/CodeGen/X86/isel-umax.ll @@ -1,19 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64 +; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86 define i8 @umax_i8(i8 %a, i8 %b) nounwind readnone { -; X64-LABEL: umax_i8: -; X64: # %bb.0: -; X64-NEXT: movl %esi, %eax -; X64-NEXT: cmpb %al, %dil -; X64-NEXT: cmoval %edi, %eax -; X64-NEXT: # kill: def $al killed $al killed $eax -; X64-NEXT: retq +; DAG-X64-LABEL: umax_i8: +; DAG-X64: # %bb.0: +; DAG-X64-NEXT: movl %esi, %eax +; DAG-X64-NEXT: cmpb %al, %dil +; DAG-X64-NEXT: cmoval %edi, %eax +; DAG-X64-NEXT: # kill: def $al killed $al killed $eax +; DAG-X64-NEXT: retq ; ; FASTISEL-X64-LABEL: umax_i8: ; FASTISEL-X64: # %bb.0: @@ -24,6 +24,17 @@ define i8 @umax_i8(i8 %a, i8 %b) nounwind readnone { ; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax ; FASTISEL-X64-NEXT: retq ; +; GISEL-X64-LABEL: umax_i8: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %esi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpb %al, %dil +; GISEL-X64-NEXT: seta %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovnew %di, %ax +; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X64-NEXT: retq +; ; X86-LABEL: umax_i8: ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx @@ -35,16 +46,20 @@ define i8 @umax_i8(i8 %a, i8 %b) nounwind readnone { ; X86-NEXT: .LBB0_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: umax_i8: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpb %cl, %al -; FASTISEL-X86-NEXT: ja .LBB0_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB0_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: umax_i8: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpb %al, %cl +; GISEL-X86-NEXT: seta %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB0_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB0_2: +; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X86-NEXT: retl %ret = call i8 @llvm.umax.i8(i8 %a, i8 %b) ret i8 %ret } @@ -57,25 +72,28 @@ define i16 @umax_i16(i16 %a, i16 %b) nounwind readnone { ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umax_i16: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpw %ax, %di -; FASTISEL-X64-NEXT: cmoval %edi, %eax -; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umax_i16: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpw %si, %ax +; GISEL-X64-NEXT: seta %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovew %si, %ax +; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: umax_i16: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmpw %cx, %ax -; X86-NEXT: ja .LBB1_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: .LBB1_2: -; X86-NEXT: # kill: def $ax killed $ax killed $eax -; X86-NEXT: retl +; DAG-X86-LABEL: umax_i16: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: cmpw %cx, %ax +; DAG-X86-NEXT: ja .LBB1_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: .LBB1_2: +; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: umax_i16: ; FASTISEL-X86: # %bb.0: @@ -88,6 +106,21 @@ define i16 @umax_i16(i16 %a, i16 %b) nounwind readnone { ; FASTISEL-X86-NEXT: .LBB1_2: ; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: umax_i16: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpw %ax, %cx +; GISEL-X86-NEXT: seta %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB1_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB1_2: +; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X86-NEXT: retl %ret = call i16 @llvm.umax.i16(i16 %a, i16 %b) ret i16 %ret } @@ -99,12 +132,15 @@ define i32 @umax_i32(i32 %a, i32 %b) nounwind readnone { ; X64-NEXT: cmoval %edi, %eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umax_i32: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpl %esi, %edi -; FASTISEL-X64-NEXT: cmoval %edi, %eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umax_i32: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpl %esi, %edi +; GISEL-X64-NEXT: seta %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovel %esi, %eax +; GISEL-X64-NEXT: retq ; ; X86-LABEL: umax_i32: ; X86: # %bb.0: @@ -117,16 +153,19 @@ define i32 @umax_i32(i32 %a, i32 %b) nounwind readnone { ; X86-NEXT: .LBB2_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: umax_i32: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpl %ecx, %eax -; FASTISEL-X86-NEXT: ja .LBB2_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB2_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: umax_i32: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpl %eax, %ecx +; GISEL-X86-NEXT: seta %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB2_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB2_2: +; GISEL-X86-NEXT: retl %ret = call i32 @llvm.umax.i32(i32 %a, i32 %b) ret i32 %ret } @@ -138,32 +177,35 @@ define i64 @umax_i64(i64 %a, i64 %b) nounwind readnone { ; X64-NEXT: cmovaq %rdi, %rax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umax_i64: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movq %rsi, %rax -; FASTISEL-X64-NEXT: cmpq %rsi, %rdi -; FASTISEL-X64-NEXT: cmovaq %rdi, %rax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umax_i64: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movq %rdi, %rax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpq %rsi, %rdi +; GISEL-X64-NEXT: seta %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmoveq %rsi, %rax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: umax_i64: -; X86: # %bb.0: -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: cmpl %eax, %ecx -; X86-NEXT: movl %esi, %edi -; X86-NEXT: sbbl %edx, %edi -; X86-NEXT: jb .LBB3_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: movl %esi, %edx -; X86-NEXT: .LBB3_2: -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: retl +; DAG-X86-LABEL: umax_i64: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: pushl %edi +; DAG-X86-NEXT: pushl %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; DAG-X86-NEXT: cmpl %eax, %ecx +; DAG-X86-NEXT: movl %esi, %edi +; DAG-X86-NEXT: sbbl %edx, %edi +; DAG-X86-NEXT: jb .LBB3_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: movl %esi, %edx +; DAG-X86-NEXT: .LBB3_2: +; DAG-X86-NEXT: popl %esi +; DAG-X86-NEXT: popl %edi +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: umax_i64: ; FASTISEL-X86: # %bb.0: @@ -184,6 +226,44 @@ define i64 @umax_i64(i64 %a, i64 %b) nounwind readnone { ; FASTISEL-X86-NEXT: popl %esi ; FASTISEL-X86-NEXT: popl %edi ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: umax_i64: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %ebp +; GISEL-X86-NEXT: pushl %ebx +; GISEL-X86-NEXT: pushl %edi +; GISEL-X86-NEXT: pushl %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; GISEL-X86-NEXT: cmpl %eax, %esi +; GISEL-X86-NEXT: seta %bl +; GISEL-X86-NEXT: xorl %ecx, %ecx +; GISEL-X86-NEXT: cmpl %edx, %ebp +; GISEL-X86-NEXT: seta %bh +; GISEL-X86-NEXT: sete %cl +; GISEL-X86-NEXT: testl %ecx, %ecx +; GISEL-X86-NEXT: je .LBB3_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movb %bl, %bh +; GISEL-X86-NEXT: .LBB3_2: +; GISEL-X86-NEXT: movzbl %bh, %edi +; GISEL-X86-NEXT: andl $1, %edi +; GISEL-X86-NEXT: je .LBB3_4 +; GISEL-X86-NEXT: # %bb.3: +; GISEL-X86-NEXT: movl %esi, %eax +; GISEL-X86-NEXT: .LBB3_4: +; GISEL-X86-NEXT: testl %edi, %edi +; GISEL-X86-NEXT: je .LBB3_6 +; GISEL-X86-NEXT: # %bb.5: +; GISEL-X86-NEXT: movl %ebp, %edx +; GISEL-X86-NEXT: .LBB3_6: +; GISEL-X86-NEXT: popl %esi +; GISEL-X86-NEXT: popl %edi +; GISEL-X86-NEXT: popl %ebx +; GISEL-X86-NEXT: popl %ebp +; GISEL-X86-NEXT: retl %ret = call i64 @llvm.umax.i64(i64 %a, i64 %b) ret i64 %ret } diff --git a/llvm/test/CodeGen/X86/isel-umin.ll b/llvm/test/CodeGen/X86/isel-umin.ll index 53a0b27..1710b9f 100644 --- a/llvm/test/CodeGen/X86/isel-umin.ll +++ b/llvm/test/CodeGen/X86/isel-umin.ll @@ -1,19 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X64 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X64 -; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=FASTISEL-X86 -; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=2 | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=X64,DAG-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X64,FASTISEL-X64 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X64 +; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=X86,DAG-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -fast-isel | FileCheck %s --check-prefixes=X86,FASTISEL-X86 +; RUN: llc < %s -mtriple=i686-linux-gnu -global-isel -global-isel-abort=1 | FileCheck %s --check-prefixes=GISEL-X86 define i8 @umin_i8(i8 %a, i8 %b) nounwind readnone { -; X64-LABEL: umin_i8: -; X64: # %bb.0: -; X64-NEXT: movl %esi, %eax -; X64-NEXT: cmpb %al, %dil -; X64-NEXT: cmovbl %edi, %eax -; X64-NEXT: # kill: def $al killed $al killed $eax -; X64-NEXT: retq +; DAG-X64-LABEL: umin_i8: +; DAG-X64: # %bb.0: +; DAG-X64-NEXT: movl %esi, %eax +; DAG-X64-NEXT: cmpb %al, %dil +; DAG-X64-NEXT: cmovbl %edi, %eax +; DAG-X64-NEXT: # kill: def $al killed $al killed $eax +; DAG-X64-NEXT: retq ; ; FASTISEL-X64-LABEL: umin_i8: ; FASTISEL-X64: # %bb.0: @@ -24,6 +24,17 @@ define i8 @umin_i8(i8 %a, i8 %b) nounwind readnone { ; FASTISEL-X64-NEXT: # kill: def $al killed $al killed $eax ; FASTISEL-X64-NEXT: retq ; +; GISEL-X64-LABEL: umin_i8: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %esi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpb %al, %dil +; GISEL-X64-NEXT: setb %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovnew %di, %ax +; GISEL-X64-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X64-NEXT: retq +; ; X86-LABEL: umin_i8: ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx @@ -35,16 +46,20 @@ define i8 @umin_i8(i8 %a, i8 %b) nounwind readnone { ; X86-NEXT: .LBB0_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: umin_i8: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpb %cl, %al -; FASTISEL-X86-NEXT: jb .LBB0_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB0_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: umin_i8: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpb %al, %cl +; GISEL-X86-NEXT: setb %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB0_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB0_2: +; GISEL-X86-NEXT: # kill: def $al killed $al killed $eax +; GISEL-X86-NEXT: retl %ret = call i8 @llvm.umin.i8(i8 %a, i8 %b) ret i8 %ret } @@ -57,25 +72,28 @@ define i16 @umin_i16(i16 %a, i16 %b) nounwind readnone { ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umin_i16: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpw %ax, %di -; FASTISEL-X64-NEXT: cmovbl %edi, %eax -; FASTISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umin_i16: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpw %si, %ax +; GISEL-X64-NEXT: setb %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovew %si, %ax +; GISEL-X64-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: umin_i16: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmpw %cx, %ax -; X86-NEXT: jb .LBB1_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: .LBB1_2: -; X86-NEXT: # kill: def $ax killed $ax killed $eax -; X86-NEXT: retl +; DAG-X86-LABEL: umin_i16: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: cmpw %cx, %ax +; DAG-X86-NEXT: jb .LBB1_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: .LBB1_2: +; DAG-X86-NEXT: # kill: def $ax killed $ax killed $eax +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: umin_i16: ; FASTISEL-X86: # %bb.0: @@ -88,6 +106,21 @@ define i16 @umin_i16(i16 %a, i16 %b) nounwind readnone { ; FASTISEL-X86-NEXT: .LBB1_2: ; FASTISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: umin_i16: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpw %ax, %cx +; GISEL-X86-NEXT: setb %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB1_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB1_2: +; GISEL-X86-NEXT: # kill: def $ax killed $ax killed $eax +; GISEL-X86-NEXT: retl %ret = call i16 @llvm.umin.i16(i16 %a, i16 %b) ret i16 %ret } @@ -99,12 +132,15 @@ define i32 @umin_i32(i32 %a, i32 %b) nounwind readnone { ; X64-NEXT: cmovbl %edi, %eax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umin_i32: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movl %esi, %eax -; FASTISEL-X64-NEXT: cmpl %esi, %edi -; FASTISEL-X64-NEXT: cmovbl %edi, %eax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umin_i32: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movl %edi, %eax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpl %esi, %edi +; GISEL-X64-NEXT: setb %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmovel %esi, %eax +; GISEL-X64-NEXT: retq ; ; X86-LABEL: umin_i32: ; X86: # %bb.0: @@ -117,16 +153,19 @@ define i32 @umin_i32(i32 %a, i32 %b) nounwind readnone { ; X86-NEXT: .LBB2_2: ; X86-NEXT: retl ; -; FASTISEL-X86-LABEL: umin_i32: -; FASTISEL-X86: # %bb.0: -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; FASTISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; FASTISEL-X86-NEXT: cmpl %ecx, %eax -; FASTISEL-X86-NEXT: jb .LBB2_2 -; FASTISEL-X86-NEXT: # %bb.1: -; FASTISEL-X86-NEXT: movl %ecx, %eax -; FASTISEL-X86-NEXT: .LBB2_2: -; FASTISEL-X86-NEXT: retl +; GISEL-X86-LABEL: umin_i32: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: xorl %edx, %edx +; GISEL-X86-NEXT: cmpl %eax, %ecx +; GISEL-X86-NEXT: setb %dl +; GISEL-X86-NEXT: andl $1, %edx +; GISEL-X86-NEXT: je .LBB2_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movl %ecx, %eax +; GISEL-X86-NEXT: .LBB2_2: +; GISEL-X86-NEXT: retl %ret = call i32 @llvm.umin.i32(i32 %a, i32 %b) ret i32 %ret } @@ -138,32 +177,35 @@ define i64 @umin_i64(i64 %a, i64 %b) nounwind readnone { ; X64-NEXT: cmovbq %rdi, %rax ; X64-NEXT: retq ; -; FASTISEL-X64-LABEL: umin_i64: -; FASTISEL-X64: # %bb.0: -; FASTISEL-X64-NEXT: movq %rsi, %rax -; FASTISEL-X64-NEXT: cmpq %rsi, %rdi -; FASTISEL-X64-NEXT: cmovbq %rdi, %rax -; FASTISEL-X64-NEXT: retq +; GISEL-X64-LABEL: umin_i64: +; GISEL-X64: # %bb.0: +; GISEL-X64-NEXT: movq %rdi, %rax +; GISEL-X64-NEXT: xorl %ecx, %ecx +; GISEL-X64-NEXT: cmpq %rsi, %rdi +; GISEL-X64-NEXT: setb %cl +; GISEL-X64-NEXT: andl $1, %ecx +; GISEL-X64-NEXT: cmoveq %rsi, %rax +; GISEL-X64-NEXT: retq ; -; X86-LABEL: umin_i64: -; X86: # %bb.0: -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: cmpl %ecx, %eax -; X86-NEXT: movl %edx, %edi -; X86-NEXT: sbbl %esi, %edi -; X86-NEXT: jb .LBB3_2 -; X86-NEXT: # %bb.1: -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: movl %esi, %edx -; X86-NEXT: .LBB3_2: -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: retl +; DAG-X86-LABEL: umin_i64: +; DAG-X86: # %bb.0: +; DAG-X86-NEXT: pushl %edi +; DAG-X86-NEXT: pushl %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; DAG-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; DAG-X86-NEXT: cmpl %ecx, %eax +; DAG-X86-NEXT: movl %edx, %edi +; DAG-X86-NEXT: sbbl %esi, %edi +; DAG-X86-NEXT: jb .LBB3_2 +; DAG-X86-NEXT: # %bb.1: +; DAG-X86-NEXT: movl %ecx, %eax +; DAG-X86-NEXT: movl %esi, %edx +; DAG-X86-NEXT: .LBB3_2: +; DAG-X86-NEXT: popl %esi +; DAG-X86-NEXT: popl %edi +; DAG-X86-NEXT: retl ; ; FASTISEL-X86-LABEL: umin_i64: ; FASTISEL-X86: # %bb.0: @@ -184,6 +226,44 @@ define i64 @umin_i64(i64 %a, i64 %b) nounwind readnone { ; FASTISEL-X86-NEXT: popl %esi ; FASTISEL-X86-NEXT: popl %edi ; FASTISEL-X86-NEXT: retl +; +; GISEL-X86-LABEL: umin_i64: +; GISEL-X86: # %bb.0: +; GISEL-X86-NEXT: pushl %ebp +; GISEL-X86-NEXT: pushl %ebx +; GISEL-X86-NEXT: pushl %edi +; GISEL-X86-NEXT: pushl %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; GISEL-X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; GISEL-X86-NEXT: cmpl %eax, %esi +; GISEL-X86-NEXT: setb %bl +; GISEL-X86-NEXT: xorl %ecx, %ecx +; GISEL-X86-NEXT: cmpl %edx, %ebp +; GISEL-X86-NEXT: setb %bh +; GISEL-X86-NEXT: sete %cl +; GISEL-X86-NEXT: testl %ecx, %ecx +; GISEL-X86-NEXT: je .LBB3_2 +; GISEL-X86-NEXT: # %bb.1: +; GISEL-X86-NEXT: movb %bl, %bh +; GISEL-X86-NEXT: .LBB3_2: +; GISEL-X86-NEXT: movzbl %bh, %edi +; GISEL-X86-NEXT: andl $1, %edi +; GISEL-X86-NEXT: je .LBB3_4 +; GISEL-X86-NEXT: # %bb.3: +; GISEL-X86-NEXT: movl %esi, %eax +; GISEL-X86-NEXT: .LBB3_4: +; GISEL-X86-NEXT: testl %edi, %edi +; GISEL-X86-NEXT: je .LBB3_6 +; GISEL-X86-NEXT: # %bb.5: +; GISEL-X86-NEXT: movl %ebp, %edx +; GISEL-X86-NEXT: .LBB3_6: +; GISEL-X86-NEXT: popl %esi +; GISEL-X86-NEXT: popl %edi +; GISEL-X86-NEXT: popl %ebx +; GISEL-X86-NEXT: popl %ebp +; GISEL-X86-NEXT: retl %ret = call i64 @llvm.umin.i64(i64 %a, i64 %b) ret i64 %ret } diff --git a/llvm/test/CodeGen/X86/logic-shift.ll b/llvm/test/CodeGen/X86/logic-shift.ll index 96e63d1..104151c 100644 --- a/llvm/test/CodeGen/X86/logic-shift.ll +++ b/llvm/test/CodeGen/X86/logic-shift.ll @@ -129,10 +129,10 @@ define <16 x i8> @or_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y, < ; CHECK-NEXT: vpsraw $4, %xmm1, %xmm5 ; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsraw $2, %xmm1, %xmm5 -; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4 -; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 +; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm6 +; CHECK-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsraw $1, %xmm1, %xmm5 -; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4 +; CHECK-NEXT: vpsllw $2, %xmm4, %xmm4 ; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsrlw $8, %xmm1, %xmm1 ; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -140,10 +140,10 @@ define <16 x i8> @or_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y, < ; CHECK-NEXT: vpsraw $4, %xmm0, %xmm4 ; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsraw $2, %xmm0, %xmm4 -; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 +; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; CHECK-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsraw $1, %xmm0, %xmm4 -; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vpsllw $2, %xmm2, %xmm2 ; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsrlw $8, %xmm0, %xmm0 ; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 @@ -413,10 +413,10 @@ define <16 x i8> @xor_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y, ; CHECK-NEXT: vpsraw $4, %xmm1, %xmm5 ; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsraw $2, %xmm1, %xmm5 -; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4 -; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 +; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm6 +; CHECK-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsraw $1, %xmm1, %xmm5 -; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4 +; CHECK-NEXT: vpsllw $2, %xmm4, %xmm4 ; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsrlw $8, %xmm1, %xmm1 ; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -424,10 +424,10 @@ define <16 x i8> @xor_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y, ; CHECK-NEXT: vpsraw $4, %xmm0, %xmm4 ; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsraw $2, %xmm0, %xmm4 -; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 +; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; CHECK-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsraw $1, %xmm0, %xmm4 -; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vpsllw $2, %xmm2, %xmm2 ; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsrlw $8, %xmm0, %xmm0 ; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 @@ -697,10 +697,10 @@ define <16 x i8> @and_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y, ; CHECK-NEXT: vpsraw $4, %xmm1, %xmm5 ; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsraw $2, %xmm1, %xmm5 -; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4 -; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 +; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm6 +; CHECK-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsraw $1, %xmm1, %xmm5 -; CHECK-NEXT: vpaddw %xmm4, %xmm4, %xmm4 +; CHECK-NEXT: vpsllw $2, %xmm4, %xmm4 ; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; CHECK-NEXT: vpsrlw $8, %xmm1, %xmm1 ; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -708,10 +708,10 @@ define <16 x i8> @and_ashr_commute3(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %y, ; CHECK-NEXT: vpsraw $4, %xmm0, %xmm4 ; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsraw $2, %xmm0, %xmm4 -; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 +; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; CHECK-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsraw $1, %xmm0, %xmm4 -; CHECK-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vpsllw $2, %xmm2, %xmm2 ; CHECK-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0 ; CHECK-NEXT: vpsrlw $8, %xmm0, %xmm0 ; CHECK-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/pr161693.ll b/llvm/test/CodeGen/X86/pr161693.ll new file mode 100644 index 0000000..de8188f --- /dev/null +++ b/llvm/test/CodeGen/X86/pr161693.ll @@ -0,0 +1,40 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s + +define void @PR161693() #0 { +; CHECK-LABEL: PR161693: +; CHECK: # %bb.0: # %start +; CHECK-NEXT: movzbl (%rax), %eax +; CHECK-NEXT: andb $-33, %al +; CHECK-NEXT: addb $-71, %al +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: .LBB0_1: # %loop +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: cmpb $-6, %al +; CHECK-NEXT: setb %cl +; CHECK-NEXT: leal (%rcx,%rcx), %edx +; CHECK-NEXT: orb %cl, %dl +; CHECK-NEXT: leal (,%rdx,4), %ecx +; CHECK-NEXT: orb %dl, %cl +; CHECK-NEXT: je .LBB0_1 +; CHECK-NEXT: # %bb.2: # %exit +; CHECK-NEXT: retq +start: + br label %loop + +loop: + %.val.i.i89 = load <16 x i8>, ptr poison, align 1 + %.not49.i = icmp ult <16 x i8> zeroinitializer, splat (i8 -10) + %i = and <16 x i8> %.val.i.i89, splat (i8 -33) + %i1 = add <16 x i8> %i, splat (i8 -71) + %.not51.i = icmp ult <16 x i8> %i1, splat (i8 -6) + %.not46.i = and <16 x i1> %.not49.i, %.not51.i + %i2 = bitcast <16 x i1> %.not46.i to i16 + %_0.i = icmp eq i16 %i2, 0 + br i1 %_0.i, label %loop, label %exit + +exit: + ret void +} + +attributes #0 = { "target-features"="+soft-float" } diff --git a/llvm/test/CodeGen/X86/prefer-avx256-shift.ll b/llvm/test/CodeGen/X86/prefer-avx256-shift.ll index bf04c8d..63bbac12 100644 --- a/llvm/test/CodeGen/X86/prefer-avx256-shift.ll +++ b/llvm/test/CodeGen/X86/prefer-avx256-shift.ll @@ -302,10 +302,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) { ; AVX256-NEXT: vpsraw $4, %ymm3, %ymm4 ; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; AVX256-NEXT: vpsraw $2, %ymm3, %ymm4 -; AVX256-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX256-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; AVX256-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; AVX256-NEXT: vpsraw $1, %ymm3, %ymm4 -; AVX256-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX256-NEXT: vpsllw $2, %ymm2, %ymm2 ; AVX256-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; AVX256-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX256-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -313,10 +313,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) { ; AVX256-NEXT: vpsraw $4, %ymm0, %ymm3 ; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX256-NEXT: vpsraw $2, %ymm0, %ymm3 -; AVX256-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX256-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; AVX256-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; AVX256-NEXT: vpsraw $1, %ymm0, %ymm3 -; AVX256-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX256-NEXT: vpsllw $2, %ymm1, %ymm1 ; AVX256-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX256-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX256-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 @@ -338,10 +338,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) { ; AVX512VL-NEXT: vpsraw $4, %ymm3, %ymm4 ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; AVX512VL-NEXT: vpsraw $2, %ymm3, %ymm4 -; AVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; AVX512VL-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; AVX512VL-NEXT: vpsraw $1, %ymm3, %ymm4 -; AVX512VL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512VL-NEXT: vpsllw $2, %ymm2, %ymm2 ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -349,10 +349,10 @@ define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) { ; AVX512VL-NEXT: vpsraw $4, %ymm0, %ymm3 ; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsraw $2, %ymm0, %ymm3 -; AVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; AVX512VL-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsraw $1, %ymm0, %ymm3 -; AVX512VL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm1 ; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 @@ -432,10 +432,10 @@ define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) { ; AVX256VL-NEXT: vpsraw $4, %xmm3, %xmm4 ; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 ; AVX256VL-NEXT: vpsraw $2, %xmm3, %xmm4 -; AVX256VL-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX256VL-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; AVX256VL-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; AVX256VL-NEXT: vpsraw $1, %xmm3, %xmm4 -; AVX256VL-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX256VL-NEXT: vpsllw $2, %xmm2, %xmm2 ; AVX256VL-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; AVX256VL-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX256VL-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -443,10 +443,10 @@ define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) { ; AVX256VL-NEXT: vpsraw $4, %xmm0, %xmm3 ; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX256VL-NEXT: vpsraw $2, %xmm0, %xmm3 -; AVX256VL-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX256VL-NEXT: vpaddw %xmm1, %xmm1, %xmm4 +; AVX256VL-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 ; AVX256VL-NEXT: vpsraw $1, %xmm0, %xmm3 -; AVX256VL-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX256VL-NEXT: vpsllw $2, %xmm1, %xmm1 ; AVX256VL-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX256VL-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX256VL-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/shuffle-as-shifts.ll b/llvm/test/CodeGen/X86/shuffle-as-shifts.ll index 9c8729b3..4b8f78d 100644 --- a/llvm/test/CodeGen/X86/shuffle-as-shifts.ll +++ b/llvm/test/CodeGen/X86/shuffle-as-shifts.ll @@ -15,20 +15,20 @@ define <4 x i32> @shuf_rot_v4i32_1032(<4 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_rot_v4i32_1032: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] +; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_rot_v4i32_1032: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] +; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_rot_v4i32_1032: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,3,2] +; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <4 x i32> %x, %x %r = shufflevector <4 x i32> %x1, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 0, i32 3, i32 2> @@ -44,20 +44,20 @@ define <8 x i32> @shuf_rot_v8i32_10325476(<8 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_rot_v8i32_10325476: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6] +; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_rot_v8i32_10325476: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6] +; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_rot_v8i32_10325476: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6] +; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <8 x i32> %x, %x %r = shufflevector <8 x i32> %x1, <8 x i32> zeroinitializer, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6> @@ -73,20 +73,20 @@ define <16 x i32> @shuf_rot_v16i32_1032547698111013121514(<16 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_rot_v16i32_1032547698111013121514: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14] +; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_rot_v16i32_1032547698111013121514: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14] +; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_rot_v16i32_1032547698111013121514: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14] +; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <16 x i32> %x, %x %r = shufflevector <16 x i32> %x1, <16 x i32> zeroinitializer, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14> @@ -168,20 +168,20 @@ define <4 x i32> @shuf_shr_v4i32_1U3U(<4 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_shr_v4i32_1U3U: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_shr_v4i32_1U3U: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_shr_v4i32_1U3U: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <4 x i32> %x, %x %r = shufflevector <4 x i32> %x1, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef> @@ -197,20 +197,20 @@ define <8 x i32> @shuf_shr_v8i32_1U3U5U7U(<8 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_shr_v8i32_1U3U5U7U: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] +; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_shr_v8i32_1U3U5U7U: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] +; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_shr_v8i32_1U3U5U7U: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] +; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <8 x i32> %x, %x %r = shufflevector <8 x i32> %x1, <8 x i32> zeroinitializer, <8 x i32> <i32 1, i32 undef, i32 3, i32 undef, i32 5, i32 undef, i32 7, i32 undef> @@ -226,20 +226,20 @@ define <16 x i32> @shuf_shr_v16i32_U3U5U7U9U11U13U15(<16 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_shr_v16i32_U3U5U7U9U11U13U15: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_shr_v16i32_U3U5U7U9U11U13U15: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_shr_v16i32_U3U5U7U9U11U13U15: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <16 x i32> %x, %x %r = shufflevector <16 x i32> %x1, <16 x i32> zeroinitializer, <16 x i32> <i32 1, i32 undef, i32 3, i32 undef, i32 5, i32 undef, i32 7, i32 undef, i32 9, i32 undef, i32 11, i32 undef, i32 13, i32 undef, i32 15, i32 undef> @@ -288,20 +288,20 @@ define <4 x i32> @shuf_shl_v4i32_U0U2(<4 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_shl_v4i32_U0U2: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2] +; CHECK-ICX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_shl_v4i32_U0U2: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2] +; CHECK-V4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_shl_v4i32_U0U2: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2] +; CHECK-ZNVER4-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <4 x i32> %x, %x %r = shufflevector <4 x i32> %x1, <4 x i32> zeroinitializer, <4 x i32> <i32 undef, i32 0, i32 undef, i32 2> @@ -317,20 +317,20 @@ define <8 x i32> @shuf_shl_v8i32_U0U2U4U6(<8 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_shl_v8i32_U0U2U4U6: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] +; CHECK-ICX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_shl_v8i32_U0U2U4U6: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] +; CHECK-V4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_shl_v8i32_U0U2U4U6: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] +; CHECK-ZNVER4-NEXT: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <8 x i32> %x, %x %r = shufflevector <8 x i32> %x1, <8 x i32> zeroinitializer, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 undef, i32 4, i32 undef, i32 6> @@ -346,20 +346,20 @@ define <16 x i32> @shuf_shl_v16i32_U0U2U4U6U8U10U12U14(<16 x i32> %x) { ; ; CHECK-ICX-LABEL: shuf_shl_v16i32_U0U2U4U6U8U10U12U14: ; CHECK-ICX: # %bb.0: -; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ICX-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-ICX-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ICX-NEXT: retq ; ; CHECK-V4-LABEL: shuf_shl_v16i32_U0U2U4U6U8U10U12U14: ; CHECK-V4: # %bb.0: -; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-V4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-V4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-V4-NEXT: retq ; ; CHECK-ZNVER4-LABEL: shuf_shl_v16i32_U0U2U4U6U8U10U12U14: ; CHECK-ZNVER4: # %bb.0: -; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ZNVER4-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-ZNVER4-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; CHECK-ZNVER4-NEXT: retq %x1 = add <16 x i32> %x, %x %r = shufflevector <16 x i32> %x1, <16 x i32> zeroinitializer, <16 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 undef, i32 4, i32 undef, i32 6, i32 undef, i32 8, i32 undef, i32 10, i32 undef, i32 12, i32 undef, i32 14> diff --git a/llvm/test/CodeGen/X86/sshl_sat_vec.ll b/llvm/test/CodeGen/X86/sshl_sat_vec.ll index f91758b..10dee14 100644 --- a/llvm/test/CodeGen/X86/sshl_sat_vec.ll +++ b/llvm/test/CodeGen/X86/sshl_sat_vec.ll @@ -602,10 +602,10 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; X64-AVX2-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; X64-AVX2-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; X64-AVX2-NEXT: vpsraw $2, %xmm3, %xmm4 -; X64-AVX2-NEXT: vpaddw %xmm5, %xmm5, %xmm5 -; X64-AVX2-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 +; X64-AVX2-NEXT: vpaddw %xmm5, %xmm5, %xmm6 +; X64-AVX2-NEXT: vpblendvb %xmm6, %xmm4, %xmm3, %xmm3 ; X64-AVX2-NEXT: vpsraw $1, %xmm3, %xmm4 -; X64-AVX2-NEXT: vpaddw %xmm5, %xmm5, %xmm5 +; X64-AVX2-NEXT: vpsllw $2, %xmm5, %xmm5 ; X64-AVX2-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; X64-AVX2-NEXT: vpsrlw $8, %xmm3, %xmm3 ; X64-AVX2-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -613,10 +613,10 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; X64-AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X64-AVX2-NEXT: vpblendvb %xmm1, %xmm5, %xmm4, %xmm4 ; X64-AVX2-NEXT: vpsraw $2, %xmm4, %xmm5 -; X64-AVX2-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; X64-AVX2-NEXT: vpblendvb %xmm1, %xmm5, %xmm4, %xmm4 +; X64-AVX2-NEXT: vpaddw %xmm1, %xmm1, %xmm6 +; X64-AVX2-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4 ; X64-AVX2-NEXT: vpsraw $1, %xmm4, %xmm5 -; X64-AVX2-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; X64-AVX2-NEXT: vpsllw $2, %xmm1, %xmm1 ; X64-AVX2-NEXT: vpblendvb %xmm1, %xmm5, %xmm4, %xmm1 ; X64-AVX2-NEXT: vpsrlw $8, %xmm1, %xmm1 ; X64-AVX2-NEXT: vpackuswb %xmm3, %xmm1, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll index 20be579..9b52857 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll @@ -536,14 +536,14 @@ define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt) ; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm5 ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm6 ; AVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm4 -; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm4 -; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5 -; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm4 -; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5 -; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm6 +; AVX1-NEXT: vpblendvb %xmm5, %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm5 +; AVX1-NEXT: vpsllw $2, %xmm4, %xmm6 +; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm5 +; AVX1-NEXT: vpsllw $3, %xmm4, %xmm4 +; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpandn %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpslld $23, %xmm3, %xmm3 diff --git a/llvm/test/CodeGen/X86/vector-fshr-256.ll b/llvm/test/CodeGen/X86/vector-fshr-256.ll index 1f16463..a387562 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-256.ll @@ -328,15 +328,15 @@ define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> % ; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm6 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7 ; AVX1-NEXT: vpsrlw $8, %xmm7, %xmm8 -; AVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm5 -; AVX1-NEXT: vpsrlw $4, %xmm5, %xmm7 -; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm5, %xmm5 -; AVX1-NEXT: vpsrlw $2, %xmm5, %xmm7 -; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6 -; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm5, %xmm5 -; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm7 -; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6 -; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm5, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm8, %xmm7, %xmm7 +; AVX1-NEXT: vpsrlw $4, %xmm7, %xmm8 +; AVX1-NEXT: vpblendvb %xmm6, %xmm8, %xmm7, %xmm6 +; AVX1-NEXT: vpsrlw $2, %xmm6, %xmm7 +; AVX1-NEXT: vpsllw $2, %xmm5, %xmm8 +; AVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm6, %xmm6 +; AVX1-NEXT: vpsrlw $1, %xmm6, %xmm7 +; AVX1-NEXT: vpsllw $3, %xmm5, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm6, %xmm5 ; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm6 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm6[4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpslld $23, %xmm4, %xmm7 @@ -358,14 +358,14 @@ define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> % ; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm7 ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm8 ; AVX1-NEXT: vpblendvb %xmm6, %xmm8, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm6 -; AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm6 -; AVX1-NEXT: vpaddw %xmm7, %xmm7, %xmm7 -; AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm1, %xmm1 -; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm6 -; AVX1-NEXT: vpaddw %xmm7, %xmm7, %xmm7 -; AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm8 +; AVX1-NEXT: vpblendvb %xmm7, %xmm8, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm7 +; AVX1-NEXT: vpsllw $2, %xmm6, %xmm8 +; AVX1-NEXT: vpblendvb %xmm8, %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm7 +; AVX1-NEXT: vpsllw $3, %xmm6, %xmm6 +; AVX1-NEXT: vpblendvb %xmm6, %xmm7, %xmm1, %xmm1 ; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm2[4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpslld $23, %xmm3, %xmm3 diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll index 02f0f53..d565ef0 100644 --- a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll @@ -293,14 +293,14 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shift_v8i16: @@ -494,10 +494,10 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm3, %xmm4 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $2, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; AVX-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $1, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpsllw $2, %xmm2, %xmm2 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -505,10 +505,10 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm0, %xmm3 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $2, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm4 +; AVX-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $1, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpsllw $2, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll index 15855e3..249bcba 100644 --- a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll @@ -237,29 +237,29 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 ; AVX1-NEXT: vpsraw $8, %xmm4, %xmm5 -; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2 -; AVX1-NEXT: vpsraw $4, %xmm2, %xmm4 -; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpsraw $2, %xmm2, %xmm4 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpsraw $1, %xmm2, %xmm4 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpsraw $4, %xmm4, %xmm5 +; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3 +; AVX1-NEXT: vpsraw $2, %xmm3, %xmm4 +; AVX1-NEXT: vpsllw $2, %xmm2, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpsraw $1, %xmm3, %xmm4 +; AVX1-NEXT: vpsllw $3, %xmm2, %xmm2 +; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3 ; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 ; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm4 ; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $4, %xmm0, %xmm4 +; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $2, %xmm0, %xmm3 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm4 +; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $1, %xmm0, %xmm3 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; @@ -339,29 +339,29 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind { ; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 ; X86-AVX1-NEXT: vpsraw $8, %xmm4, %xmm5 -; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2 -; X86-AVX1-NEXT: vpsraw $4, %xmm2, %xmm4 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; X86-AVX1-NEXT: vpsraw $2, %xmm2, %xmm4 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; X86-AVX1-NEXT: vpsraw $1, %xmm2, %xmm4 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 +; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 +; X86-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5 +; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3 +; X86-AVX1-NEXT: vpsraw $2, %xmm3, %xmm4 +; X86-AVX1-NEXT: vpsllw $2, %xmm2, %xmm5 +; X86-AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 +; X86-AVX1-NEXT: vpsraw $1, %xmm3, %xmm4 +; X86-AVX1-NEXT: vpsllw $3, %xmm2, %xmm2 +; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; X86-AVX1-NEXT: vpsllw $12, %xmm1, %xmm3 ; X86-AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 ; X86-AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1 ; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3 ; X86-AVX1-NEXT: vpsraw $8, %xmm0, %xmm4 ; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpsraw $4, %xmm0, %xmm4 +; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpsraw $2, %xmm0, %xmm3 +; X86-AVX1-NEXT: vpsllw $2, %xmm1, %xmm4 +; X86-AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm3 +; X86-AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; X86-AVX1-NEXT: retl ; @@ -393,10 +393,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX1-NEXT: vpsraw $4, %xmm5, %xmm6 ; AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5 ; AVX1-NEXT: vpsraw $2, %xmm5, %xmm6 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5 +; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm7 +; AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5 ; AVX1-NEXT: vpsraw $1, %xmm5, %xmm6 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpsllw $2, %xmm3, %xmm3 ; AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm3 ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -404,10 +404,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX1-NEXT: vpsraw $4, %xmm4, %xmm5 ; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 ; AVX1-NEXT: vpsraw $2, %xmm4, %xmm5 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm6 +; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4 ; AVX1-NEXT: vpsraw $1, %xmm4, %xmm5 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm2, %xmm2 ; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 @@ -417,10 +417,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX1-NEXT: vpsraw $4, %xmm4, %xmm5 ; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4 ; AVX1-NEXT: vpsraw $2, %xmm4, %xmm5 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm6 +; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4 ; AVX1-NEXT: vpsraw $1, %xmm4, %xmm5 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpsllw $2, %xmm3, %xmm3 ; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3 ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -428,10 +428,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX1-NEXT: vpsraw $4, %xmm0, %xmm4 ; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vpsraw $2, %xmm0, %xmm4 -; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vpsraw $1, %xmm0, %xmm4 -; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm1 ; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0 @@ -446,10 +446,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX2-NEXT: vpsraw $4, %ymm3, %ymm4 ; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; AVX2-NEXT: vpsraw $2, %ymm3, %ymm4 -; AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; AVX2-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; AVX2-NEXT: vpsraw $1, %ymm3, %ymm4 -; AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpsllw $2, %ymm2, %ymm2 ; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -457,10 +457,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX2-NEXT: vpsraw $4, %ymm0, %ymm3 ; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX2-NEXT: vpsraw $2, %ymm0, %ymm3 -; AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; AVX2-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; AVX2-NEXT: vpsraw $1, %ymm0, %ymm3 -; AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vpsllw $2, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 @@ -498,10 +498,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX512DQ-NEXT: vpsraw $4, %ymm3, %ymm4 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpsraw $2, %ymm3, %ymm4 -; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; AVX512DQ-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpsraw $1, %ymm3, %ymm4 -; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpsllw $2, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -509,10 +509,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm3 ; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm3 -; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm3 -; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm1 ; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 @@ -534,10 +534,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX512DQVL-NEXT: vpsraw $4, %ymm3, %ymm4 ; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; AVX512DQVL-NEXT: vpsraw $2, %ymm3, %ymm4 -; AVX512DQVL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; AVX512DQVL-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; AVX512DQVL-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; AVX512DQVL-NEXT: vpsraw $1, %ymm3, %ymm4 -; AVX512DQVL-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512DQVL-NEXT: vpsllw $2, %ymm2, %ymm2 ; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; AVX512DQVL-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -545,10 +545,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; AVX512DQVL-NEXT: vpsraw $4, %ymm0, %ymm3 ; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX512DQVL-NEXT: vpsraw $2, %ymm0, %ymm3 -; AVX512DQVL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; AVX512DQVL-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; AVX512DQVL-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; AVX512DQVL-NEXT: vpsraw $1, %ymm0, %ymm3 -; AVX512DQVL-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX512DQVL-NEXT: vpsllw $2, %ymm1, %ymm1 ; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; AVX512DQVL-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX512DQVL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 @@ -572,10 +572,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; X86-AVX1-NEXT: vpsraw $4, %xmm5, %xmm6 ; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5 ; X86-AVX1-NEXT: vpsraw $2, %xmm5, %xmm6 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm5 +; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm7 +; X86-AVX1-NEXT: vpblendvb %xmm7, %xmm6, %xmm5, %xmm5 ; X86-AVX1-NEXT: vpsraw $1, %xmm5, %xmm6 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; X86-AVX1-NEXT: vpsllw $2, %xmm3, %xmm3 ; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm6, %xmm5, %xmm3 ; X86-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; X86-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -583,10 +583,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; X86-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5 ; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 ; X86-AVX1-NEXT: vpsraw $2, %xmm4, %xmm5 -; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 +; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm6 +; X86-AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4 ; X86-AVX1-NEXT: vpsraw $1, %xmm4, %xmm5 -; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; X86-AVX1-NEXT: vpsllw $2, %xmm2, %xmm2 ; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2 ; X86-AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; X86-AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 @@ -596,10 +596,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; X86-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5 ; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4 ; X86-AVX1-NEXT: vpsraw $2, %xmm4, %xmm5 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm4 +; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm6 +; X86-AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4 ; X86-AVX1-NEXT: vpsraw $1, %xmm4, %xmm5 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; X86-AVX1-NEXT: vpsllw $2, %xmm3, %xmm3 ; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3 ; X86-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; X86-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -607,10 +607,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; X86-AVX1-NEXT: vpsraw $4, %xmm0, %xmm4 ; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsraw $2, %xmm0, %xmm4 -; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm5 +; X86-AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm4 -; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; X86-AVX1-NEXT: vpsllw $2, %xmm1, %xmm1 ; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0 @@ -625,10 +625,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; X86-AVX2-NEXT: vpsraw $4, %ymm3, %ymm4 ; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 ; X86-AVX2-NEXT: vpsraw $2, %ymm3, %ymm4 -; X86-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; X86-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm5 +; X86-AVX2-NEXT: vpblendvb %ymm5, %ymm4, %ymm3, %ymm3 ; X86-AVX2-NEXT: vpsraw $1, %ymm3, %ymm4 -; X86-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; X86-AVX2-NEXT: vpsllw $2, %ymm2, %ymm2 ; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 ; X86-AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 ; X86-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -636,10 +636,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind { ; X86-AVX2-NEXT: vpsraw $4, %ymm0, %ymm3 ; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpsraw $2, %ymm0, %ymm3 -; X86-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; X86-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm4 +; X86-AVX2-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpsraw $1, %ymm0, %ymm3 -; X86-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; X86-AVX2-NEXT: vpsllw $2, %ymm1, %ymm1 ; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll index ea0745b..0fb0420 100644 --- a/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll @@ -59,10 +59,10 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; AVX512DQ-NEXT: vpsraw $4, %ymm5, %ymm6 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5 ; AVX512DQ-NEXT: vpsraw $2, %ymm5, %ymm6 -; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3 -; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm5 +; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm7 +; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm6, %ymm5, %ymm5 ; AVX512DQ-NEXT: vpsraw $1, %ymm5, %ymm6 -; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsllw $2, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm6, %ymm5, %ymm3 ; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -70,10 +70,10 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4 ; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5 -; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2 -; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm6 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4 ; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5 -; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpsllw $2, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm4, %ymm2 ; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2 ; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm2, %ymm2 @@ -83,10 +83,10 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 ; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5 -; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3 -; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4 +; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm6 +; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4 ; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5 -; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpsllw $2, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3 ; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3 ; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] @@ -94,10 +94,10 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm4 ; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm4 -; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1 -; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm5 +; AVX512DQ-NEXT: vpblendvb %ymm5, %ymm4, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm4 -; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm1 ; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm0, %ymm0 @@ -113,11 +113,11 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; AVX512BW-NEXT: vpmovb2m %zmm4, %k1 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1} ; AVX512BW-NEXT: vpsraw $2, %zmm2, %zmm3 -; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4 -; AVX512BW-NEXT: vpmovb2m %zmm4, %k1 +; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm5 +; AVX512BW-NEXT: vpmovb2m %zmm5, %k1 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1} ; AVX512BW-NEXT: vpsraw $1, %zmm2, %zmm3 -; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4 +; AVX512BW-NEXT: vpsllw $2, %zmm4, %zmm4 ; AVX512BW-NEXT: vpmovb2m %zmm4, %k1 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1} ; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2 @@ -127,11 +127,11 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind { ; AVX512BW-NEXT: vpmovb2m %zmm1, %k1 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1} ; AVX512BW-NEXT: vpsraw $2, %zmm0, %zmm3 -; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1 -; AVX512BW-NEXT: vpmovb2m %zmm1, %k1 +; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm4 +; AVX512BW-NEXT: vpmovb2m %zmm4, %k1 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1} ; AVX512BW-NEXT: vpsraw $1, %zmm0, %zmm3 -; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpsllw $2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpmovb2m %zmm1, %k1 ; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1} ; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll index f7de8d4..c5d3297 100644 --- a/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll @@ -196,14 +196,14 @@ define <4 x i16> @var_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shift_v4i16: @@ -367,14 +367,14 @@ define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpsraw $8, %xmm0, %xmm3 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shift_v2i16: @@ -568,10 +568,10 @@ define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm3, %xmm4 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $2, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; AVX-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $1, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpsllw $2, %xmm2, %xmm2 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -579,10 +579,10 @@ define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm0, %xmm3 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $2, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm4 +; AVX-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $1, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpsllw $2, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -796,10 +796,10 @@ define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm3, %xmm4 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $2, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; AVX-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $1, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpsllw $2, %xmm2, %xmm2 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -807,10 +807,10 @@ define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm0, %xmm3 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $2, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm4 +; AVX-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $1, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpsllw $2, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 @@ -1024,10 +1024,10 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm3, %xmm4 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $2, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm5 +; AVX-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpsraw $1, %xmm3, %xmm4 -; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpsllw $2, %xmm2, %xmm2 ; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] @@ -1035,10 +1035,10 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind { ; AVX-NEXT: vpsraw $4, %xmm0, %xmm3 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $2, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm4 +; AVX-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsraw $1, %xmm0, %xmm3 -; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpsllw $2, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll index 1d1697a..8cb2c7b 100644 --- a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll @@ -262,14 +262,14 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm3 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shift_v8i16: diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll index 3a4bb22..606adb4 100644 --- a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll @@ -198,29 +198,29 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm5 -; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2 -; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm4 -; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm4 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm4 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpsrlw $4, %xmm4, %xmm5 +; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3 +; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm4 +; AVX1-NEXT: vpsllw $2, %xmm2, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm4 +; AVX1-NEXT: vpsllw $3, %xmm2, %xmm2 +; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; AVX1-NEXT: vpsllw $12, %xmm1, %xmm3 ; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 ; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm4 ; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm4 +; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm4 +; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; @@ -300,29 +300,29 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind { ; X86-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 ; X86-AVX1-NEXT: vpsrlw $8, %xmm4, %xmm5 -; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm2 -; X86-AVX1-NEXT: vpsrlw $4, %xmm2, %xmm4 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; X86-AVX1-NEXT: vpsrlw $2, %xmm2, %xmm4 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 -; X86-AVX1-NEXT: vpsrlw $1, %xmm2, %xmm4 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm2, %xmm2 +; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4 +; X86-AVX1-NEXT: vpsrlw $4, %xmm4, %xmm5 +; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3 +; X86-AVX1-NEXT: vpsrlw $2, %xmm3, %xmm4 +; X86-AVX1-NEXT: vpsllw $2, %xmm2, %xmm5 +; X86-AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm3, %xmm3 +; X86-AVX1-NEXT: vpsrlw $1, %xmm3, %xmm4 +; X86-AVX1-NEXT: vpsllw $3, %xmm2, %xmm2 +; X86-AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 ; X86-AVX1-NEXT: vpsllw $12, %xmm1, %xmm3 ; X86-AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 ; X86-AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1 ; X86-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm3 ; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm4 ; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1 -; X86-AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 -; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm1, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpsrlw $4, %xmm0, %xmm4 +; X86-AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3 +; X86-AVX1-NEXT: vpsllw $2, %xmm1, %xmm4 +; X86-AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3 +; X86-AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; X86-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; X86-AVX1-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll index 7928111..57874c4 100644 --- a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll @@ -196,14 +196,14 @@ define <4 x i16> @var_shift_v4i16(<4 x i16> %a, <4 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm3 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shift_v4i16: @@ -367,14 +367,14 @@ define <2 x i16> @var_shift_v2i16(<2 x i16> %a, <2 x i16> %b) nounwind { ; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm3 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: var_shift_v2i16: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll index dbbfaab..be41945 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll @@ -8079,14 +8079,14 @@ define <16 x i16> @pr43230(<16 x i16> %a, <16 x i16> %b) { ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm3 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3 +; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $2, %xmm1, %xmm3 +; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpsllw $3, %xmm1, %xmm1 +; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: retq diff --git a/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir b/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir index 5d644c3..718fa6f 100644 --- a/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir +++ b/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir @@ -366,7 +366,8 @@ frameInfo: maxCallFrameSize: 0 localFrameSize: 144 machineFunctionInfo: - stackSizeSVE: 0 + stackSizeZPR: 0 + stackSizePPR: 0 stack: - { id: 0, name: StackGuardSlot, offset: -40, size: 8, alignment: 8, stack-id: default, local-offset: -8 } diff --git a/llvm/test/DebugInfo/AArch64/compiler-gen-bbs-livedebugvalues.mir b/llvm/test/DebugInfo/AArch64/compiler-gen-bbs-livedebugvalues.mir index 013d933..b7a9892 100644 --- a/llvm/test/DebugInfo/AArch64/compiler-gen-bbs-livedebugvalues.mir +++ b/llvm/test/DebugInfo/AArch64/compiler-gen-bbs-livedebugvalues.mir @@ -69,7 +69,8 @@ frameInfo: hasCalls: true maxCallFrameSize: 0 machineFunctionInfo: - stackSizeSVE: 0 + stackSizeZPR: 0 + stackSizePPR: 0 stack: - { id: 0, type: spill-slot, offset: -20, size: 4, alignment: 4, stack-id: default } - { id: 1, type: spill-slot, offset: -8, size: 8, alignment: 8, stack-id: default, diff --git a/llvm/test/DebugInfo/X86/dynamic-bitfield.ll b/llvm/test/DebugInfo/X86/dynamic-bitfield.ll index c9148ca4..f893597 100644 --- a/llvm/test/DebugInfo/X86/dynamic-bitfield.ll +++ b/llvm/test/DebugInfo/X86/dynamic-bitfield.ll @@ -27,7 +27,7 @@ source_filename = "bitfield.c" !6 = !{} !7 = !{!0, !2} !8 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "PackedBits", file: !5, line: 3, size: 40, elements: !9) -!9 = !{!10, !12, !16} +!9 = !{!10, !12, !16, !21} !10 = !DIDerivedType(tag: DW_TAG_member, name: "a", scope: !8, file: !5, line: 5, baseType: !11, size: 8) ; CHECK: DW_TAG_member ; CHECK-NEXT: DW_AT_name{{.*}}"a" @@ -60,5 +60,14 @@ source_filename = "bitfield.c" ; CHECK: DW_AT_bit_size [DW_FORM_exprloc] (DW_OP_lit27) ; CHECK-NEXT: DW_AT_data_bit_offset [DW_FORM_exprloc] (DW_OP_lit13) ; CHECK-NOT: DW_AT_data_member_location -; CHECK: DW_TAG !20 = !{!"clang version 3.9.0 (trunk 267633)"} +!21 = !DIDerivedType(tag: DW_TAG_member, name: "d", scope: !8, file: !5, line: 7, baseType: !13, offset: !DIExpression(DW_OP_constu, 15), flags: DIFlagBitField) +; CHECK: DW_TAG_member +; CHECK-NEXT: DW_AT_name{{.*}}"d" +; CHECK-NOT: DW_TAG +; CHECK-NOT: DW_AT_bit_offset +; CHECK-NOT: DW_AT_byte_size +; CHECK-NOT: DW_AT_bit_size +; CHECK: DW_AT_data_bit_offset [DW_FORM_exprloc] (DW_OP_lit15) +; CHECK-NOT: DW_AT_data_member_location +; CHECK: DW_TAG diff --git a/llvm/test/DebugInfo/symbolize-build-id.test b/llvm/test/DebugInfo/symbolize-build-id.test index d63f43f..2620718 100644 --- a/llvm/test/DebugInfo/symbolize-build-id.test +++ b/llvm/test/DebugInfo/symbolize-build-id.test @@ -21,6 +21,7 @@ Sections: Type: SHT_NOTE Flags: [ SHF_ALLOC ] Content: 040000000800000003000000474e5500abb50d82b6bdc861 + AddressAlign: 4 ProgramHeaders: - Type: PT_NOTE Flags: [ PF_R ] diff --git a/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s b/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s index 9296f04..ed76a28 100644 --- a/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s +++ b/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s @@ -22,7 +22,7 @@ # CHECK-OBJ: Contents of section .rodata: # CHECK-OBJ: 0000 48310048 32004833 00 H1.H2.H3. -# CHECK-LG: Starting link phase 1 for graph +# CHECK-LG: Starting link phase 1 # CHECK-LG: section .rodata: # CHECK-LG: block 0x0 size = 0x00000009, align = 1, alignment-offset = 0 diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-0.s b/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-0.s new file mode 100644 index 0000000..557e403 --- /dev/null +++ b/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-0.s @@ -0,0 +1,7 @@ + .section __DATA,__data + .globl x + .p2align 2, 0x0 +x: + .long 0 + +.subsections_via_symbols diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-1.s b/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-1.s new file mode 100644 index 0000000..711c8a0 --- /dev/null +++ b/llvm/test/ExecutionEngine/JITLink/AArch64/Inputs/x-1.s @@ -0,0 +1,7 @@ + .section __DATA,__data + .globl x + .p2align 2, 0x0 +x: + .long 1 + +.subsections_via_symbols diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_universal_slice_selection.s b/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_universal_slice_selection.s new file mode 100644 index 0000000..c58f84e --- /dev/null +++ b/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_universal_slice_selection.s @@ -0,0 +1,32 @@ +# RUN: rm -rf %t && mkdir -p %t +# RUN: llvm-mc -triple=arm64e-apple-darwin -filetype=obj -o %t/main.o %s +# RUN: llvm-mc -triple=arm64-apple-darwin -filetype=obj -o %t/x.arm64.o \ +# RUN: %S/Inputs/x-1.s +# RUN: llvm-ar crs %t/libX.arm64.a %t/x.arm64.o +# RUN: llvm-mc -triple=arm64e-apple-darwin -filetype=obj -o %t/x.arm64e.o \ +# RUN: %S/Inputs/x-0.s +# RUN: llvm-ar crs %t/libX.arm64e.a %t/x.arm64e.o +# RUN: llvm-lipo --create --output %t/libX.a %t/libX.arm64.a %t/libX.arm64e.a +# RUN: llvm-jitlink -noexec -check=%s %t/main.o -L%t -lX +# +# Create a universal archive with two slices (arm64e, arm64) each containing +# a definition of X: in arm64e X = 0, in arm64 X = 1. +# Check that if we load an arm64e object file then we link the arm64e slice +# of the archive by verifying that X = 0. +# + +# jitlink-check: *{4}x = 0 + + .section __TEXT,__text,regular,pure_instructions + .globl _main + .p2align 2 +_main: + mov w0, #0 + ret + + .section __DATA,__data + .globl p +p: + .quad x + +.subsections_via_symbols diff --git a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s index 2b5c9e3..5f6babf 100644 --- a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s +++ b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s @@ -102,7 +102,7 @@ p: call o .size p, .-p -# CHECK: Link graph "{{.*}}" before copy-and-fixup: +# CHECK: Link graph before copy-and-fixup: # CHECK: section .text: # CHECK: block 0x1000 # CHECK: symbols: diff --git a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s index 3bbfd55..c31250b 100644 --- a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s +++ b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s @@ -131,7 +131,7 @@ p: call o .size p, .-p -# CHECK: Link graph "{{.*}}" before copy-and-fixup: +# CHECK: Link graph before copy-and-fixup: # CHECK: section .text: # CHECK: block 0x1000 # CHECK: symbols: diff --git a/llvm/test/MC/AMDGPU/hsa-gfx1250-v4.s b/llvm/test/MC/AMDGPU/hsa-gfx1250-v4.s index 3c69361..80a340c 100644 --- a/llvm/test/MC/AMDGPU/hsa-gfx1250-v4.s +++ b/llvm/test/MC/AMDGPU/hsa-gfx1250-v4.s @@ -178,6 +178,7 @@ max_vgprs: // ASM-NEXT: .amdhsa_next_free_sgpr 32 // ASM-NEXT: .amdhsa_named_barrier_count 3 // ASM-NEXT: .amdhsa_reserve_vcc 0 +// ASM-NEXT: .amdhsa_reserve_xnack_mask 1 // ASM-NEXT: .amdhsa_float_round_mode_32 1 // ASM-NEXT: .amdhsa_float_round_mode_16_64 1 // ASM-NEXT: .amdhsa_float_denorm_mode_32 1 diff --git a/llvm/test/MC/AMDGPU/hsa-gfx1251-v4.s b/llvm/test/MC/AMDGPU/hsa-gfx1251-v4.s index 776006b..642e62d 100644 --- a/llvm/test/MC/AMDGPU/hsa-gfx1251-v4.s +++ b/llvm/test/MC/AMDGPU/hsa-gfx1251-v4.s @@ -178,6 +178,7 @@ max_vgprs: // ASM-NEXT: .amdhsa_next_free_sgpr 32 // ASM-NEXT: .amdhsa_named_barrier_count 3 // ASM-NEXT: .amdhsa_reserve_vcc 0 +// ASM-NEXT: .amdhsa_reserve_xnack_mask 1 // ASM-NEXT: .amdhsa_float_round_mode_32 1 // ASM-NEXT: .amdhsa_float_round_mode_16_64 1 // ASM-NEXT: .amdhsa_float_denorm_mode_32 1 diff --git a/llvm/test/TableGen/RegisterClassCopyCost.td b/llvm/test/TableGen/RegisterClassCopyCost.td new file mode 100644 index 0000000..fc65fdb --- /dev/null +++ b/llvm/test/TableGen/RegisterClassCopyCost.td @@ -0,0 +1,31 @@ +// RUN: llvm-tblgen --gen-register-info -I %p/../../include %s 2>&1 | FileCheck %s +// RUN: not llvm-tblgen --gen-register-info -I %p/../../include -DERROR %s 2>&1 | FileCheck -check-prefix=ERROR %s + +// Check that there is no assertion when specifying unsupported +// CopyCost values on register classes. Check that negative CopyCost +// values are saturated to 255. + +include "llvm/Target/Target.td" + +// CHECK: extern const MCRegisterClass MyTargetMCRegisterClasses[] = { +// CHECK-NEXT: { GPR32, GPR32Bits, 0, 2, sizeof(GPR32Bits), MyTarget::GPR32RegClassID, 32, 1, true, false }, +// CHECK-NEXT: { SPECIAL_CLASS, SPECIAL_CLASSBits, 6, 1, sizeof(SPECIAL_CLASSBits), MyTarget::SPECIAL_CLASSRegClassID, 32, 255, true, false }, +// CHECK-NEXT: }; + +def MyTargetISA : InstrInfo; +def MyTarget : Target { let InstructionSet = MyTargetISA; } + +def R0 : Register<"r0"> { let Namespace = "MyTarget"; } +def R1 : Register<"r1"> { let Namespace = "MyTarget"; } +def SPECIAL : Register<"special"> { let Namespace = "MyTarget"; } + +// ERROR: :[[@LINE+1]]:5: error: 'CopyCost' must be an 8-bit value +def GPR32 : RegisterClass<"MyTarget", [i32], 32, (add R0, R1)> { +#ifdef ERROR + let CopyCost = 256; +#endif +} + +def SPECIAL_CLASS : RegisterClass<"MyTarget", [i32], 32, (add SPECIAL)> { + let CopyCost = -1; +} diff --git a/llvm/test/Transforms/GVN/masked-load-store-no-mem-dep.ll b/llvm/test/Transforms/GVN/masked-load-store-no-mem-dep.ll new file mode 100644 index 0000000..512ea37 --- /dev/null +++ b/llvm/test/Transforms/GVN/masked-load-store-no-mem-dep.ll @@ -0,0 +1,34 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=gvn -S -enable-gvn-memdep=true < %s | FileCheck %s +; RUN: opt -passes=gvn -S -enable-gvn-memdep=false < %s | FileCheck %s --check-prefix=MEMDEPFALSE + +define <4 x float> @forward_binop_with_sel(ptr %0, ptr %1, i32 %a, i32 %b, <4 x float> %passthrough) { +; CHECK-LABEL: @forward_binop_with_sel( +; CHECK-NEXT: [[MASK:%.*]] = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP0:%.*]], i32 1, <4 x i1> [[MASK]], <4 x float> zeroinitializer) +; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[GEP_0_16]], i32 1, <4 x i1> [[MASK]], <4 x float> zeroinitializer) +; CHECK-NEXT: [[FMUL:%.*]] = fmul <4 x float> [[LOAD_0_0]], [[LOAD_0_16]] +; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <4 x i1> [[MASK]]) +; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[MASK]], <4 x float> [[FMUL]], <4 x float> [[PASSTHROUGH:%.*]] +; CHECK-NEXT: ret <4 x float> [[TMP3]] +; +; MEMDEPFALSE-LABEL: @forward_binop_with_sel( +; MEMDEPFALSE-NEXT: [[MASK:%.*]] = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +; MEMDEPFALSE-NEXT: [[LOAD_0_0:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP0:%.*]], i32 1, <4 x i1> [[MASK]], <4 x float> zeroinitializer) +; MEMDEPFALSE-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; MEMDEPFALSE-NEXT: [[LOAD_0_16:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[GEP_0_16]], i32 1, <4 x i1> [[MASK]], <4 x float> zeroinitializer) +; MEMDEPFALSE-NEXT: [[FMUL:%.*]] = fmul <4 x float> [[LOAD_0_0]], [[LOAD_0_16]] +; MEMDEPFALSE-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <4 x i1> [[MASK]]) +; MEMDEPFALSE-NEXT: [[LOAD_1_0:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP1]], i32 1, <4 x i1> [[MASK]], <4 x float> [[PASSTHROUGH:%.*]]) +; MEMDEPFALSE-NEXT: ret <4 x float> [[LOAD_1_0]] +; + %mask = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %a, i32 %b) + %load.0.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + %gep.0.16 = getelementptr i8, ptr %0, i32 16 + %load.0.16 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %gep.0.16, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + %fmul = fmul <4 x float> %load.0.0, %load.0.16 + call void @llvm.masked.store.v4f32.p0(<4 x float> %fmul, ptr %1, i32 1, <4 x i1> %mask) + %load.1.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %1, i32 1, <4 x i1> %mask, <4 x float> %passthrough) + ret <4 x float> %load.1.0 +} diff --git a/llvm/test/Transforms/GVN/masked-load-store.ll b/llvm/test/Transforms/GVN/masked-load-store.ll index 984a756..b112e99 100644 --- a/llvm/test/Transforms/GVN/masked-load-store.ll +++ b/llvm/test/Transforms/GVN/masked-load-store.ll @@ -36,6 +36,180 @@ define <128 x i8> @f1(ptr %a0, <128 x i8> %a1, <128 x i8> %a2) { ret <128 x i8> %v4 } -declare <128 x i8> @llvm.masked.load.v128i8.p0(ptr, i32, <128 x i1>, <128 x i8>) -declare void @llvm.masked.store.v128i8.p0(<128 x i8>, ptr, i32, <128 x i1>) +define <4 x float> @forward_masked_load(ptr %0, ptr %1) { +; CHECK-LABEL: @forward_masked_load( +; CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP0:%.*]], i32 1, <4 x i1> splat (i1 true), <4 x float> zeroinitializer) +; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[TMP4]], ptr [[TMP1:%.*]], i32 1, <4 x i1> splat (i1 true)) +; CHECK-NEXT: ret <4 x float> [[TMP4]] +; + %mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 0, i32 4) + %load1 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + call void @llvm.masked.store.v4f32.p0(<4 x float> %load1, ptr %1, i32 1, <4 x i1> %mask) + %load2 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %1, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + ret <4 x float> %load2 +} + +define <4 x float> @forward_masked_load_arbitrary_mask(ptr %loc_a, ptr %loc_b, <4 x i1> %mask) { +; CHECK-LABEL: @forward_masked_load_arbitrary_mask( +; CHECK-NEXT: [[LOAD1:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[LOC_A:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x float> zeroinitializer) +; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[LOAD1]], ptr [[LOC_B:%.*]], i32 1, <4 x i1> [[MASK]]) +; CHECK-NEXT: [[TMP1:%.*]] = select <4 x i1> [[MASK]], <4 x float> [[LOAD1]], <4 x float> zeroinitializer +; CHECK-NEXT: ret <4 x float> [[TMP1]] +; + %load1 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %loc_a, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + call void @llvm.masked.store.v4f32.p0(<4 x float> %load1, ptr %loc_b, i32 1, <4 x i1> %mask) + %load2 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %loc_b, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + ret <4 x float> %load2 +} + +define <4 x float> @forward_binop_splat_i1_mask(ptr %0, ptr %1) { +; CHECK-LABEL: @forward_binop_splat_i1_mask( +; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP0:%.*]], i32 1, <4 x i1> splat (i1 true), <4 x float> zeroinitializer) +; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[GEP_0_16]], i32 1, <4 x i1> splat (i1 true), <4 x float> zeroinitializer) +; CHECK-NEXT: [[FMUL:%.*]] = fmul <4 x float> [[LOAD_0_0]], [[LOAD_0_16]] +; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <4 x i1> splat (i1 true)) +; CHECK-NEXT: ret <4 x float> [[FMUL]] +; + %mask = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 0, i32 4) + %load.0.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + %gep.0.16 = getelementptr i8, ptr %0, i32 16 + %load.0.16 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %gep.0.16, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + %fmul = fmul <4 x float> %load.0.0, %load.0.16 + call void @llvm.masked.store.v4f32.p0(<4 x float> %fmul, ptr %1, i32 1, <4 x i1> %mask) + %load.1.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %1, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + ret <4 x float> %load.1.0 +} + +define <4 x float> @forward_binop_with_sel(ptr %0, ptr %1, i32 %a, i32 %b, <4 x float> %passthrough) { +; CHECK-LABEL: @forward_binop_with_sel( +; CHECK-NEXT: [[MASK:%.*]] = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP0:%.*]], i32 1, <4 x i1> [[MASK]], <4 x float> zeroinitializer) +; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[GEP_0_16]], i32 1, <4 x i1> [[MASK]], <4 x float> zeroinitializer) +; CHECK-NEXT: [[FMUL:%.*]] = fmul <4 x float> [[LOAD_0_0]], [[LOAD_0_16]] +; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <4 x i1> [[MASK]]) +; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[MASK]], <4 x float> [[FMUL]], <4 x float> [[PASSTHROUGH:%.*]] +; CHECK-NEXT: ret <4 x float> [[TMP3]] +; + %mask = tail call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %a, i32 %b) + %load.0.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + %gep.0.16 = getelementptr i8, ptr %0, i32 16 + %load.0.16 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %gep.0.16, i32 1, <4 x i1> %mask, <4 x float> zeroinitializer) + %fmul = fmul <4 x float> %load.0.0, %load.0.16 + call void @llvm.masked.store.v4f32.p0(<4 x float> %fmul, ptr %1, i32 1, <4 x i1> %mask) + %load.1.0 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %1, i32 1, <4 x i1> %mask, <4 x float> %passthrough) + ret <4 x float> %load.1.0 +} + +define <vscale x 4 x float> @forward_masked_load_scalable(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) { +; CHECK-LABEL: @forward_masked_load_scalable( +; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) +; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[TMP3]], <vscale x 4 x float> [[PASSTHROUGH:%.*]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP4]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[TMP3]]) +; CHECK-NEXT: [[TMP5:%.*]] = select <vscale x 4 x i1> [[TMP3]], <vscale x 4 x float> [[TMP4]], <vscale x 4 x float> [[PASSTHROUGH]] +; CHECK-NEXT: ret <vscale x 4 x float> [[TMP5]] +; + %mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) + %load1 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> %passthrough) + call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %load1, ptr %1, i32 1, <vscale x 4 x i1> %mask) + %load2 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> %passthrough) + ret <vscale x 4 x float> %load2 +} +define <vscale x 4 x float> @forward_masked_load_scalable_type_mismatch(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) { +; CHECK-LABEL: @forward_masked_load_scalable_type_mismatch( +; CHECK-NEXT: [[MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) +; CHECK-NEXT: [[LOAD1:%.*]] = call <vscale x 4 x double> @llvm.masked.load.nxv4f64.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[MASK]], <vscale x 4 x double> zeroinitializer) +; CHECK-NEXT: call void @llvm.masked.store.nxv4f64.p0(<vscale x 4 x double> [[LOAD1]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[MASK]]) +; CHECK-NEXT: [[LOAD2:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP1]], i32 1, <vscale x 4 x i1> [[MASK]], <vscale x 4 x float> [[PASSTHROUGH:%.*]]) +; CHECK-NEXT: ret <vscale x 4 x float> [[LOAD2]] +; + %mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) + %load1 = call <vscale x 4 x double> @llvm.masked.load.nxv4f64.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x double> zeroinitializer) + call void @llvm.masked.store.nxv4f64.p0(<vscale x 4 x double> %load1, ptr %1, i32 1, <vscale x 4 x i1> %mask) + %load2 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> %passthrough) + ret <vscale x 4 x float> %load2 +} + +define <vscale x 4 x float> @generate_sel_with_passthrough(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) { +; CHECK-LABEL: @generate_sel_with_passthrough( +; CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) +; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[TMP3]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[TMP4]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[TMP3]]) +; CHECK-NEXT: [[TMP5:%.*]] = select <vscale x 4 x i1> [[TMP3]], <vscale x 4 x float> [[TMP4]], <vscale x 4 x float> [[PASSTHROUGH:%.*]] +; CHECK-NEXT: ret <vscale x 4 x float> [[TMP5]] +; + %mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) + %load1 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> zeroinitializer) + call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %load1, ptr %1, i32 1, <vscale x 4 x i1> %mask) + %load2 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> %passthrough) + ret <vscale x 4 x float> %load2 +} + +define <vscale x 4 x float> @forward_binop_with_sel_scalable(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) { +; CHECK-LABEL: @forward_binop_with_sel_scalable( +; CHECK-NEXT: [[MASK:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) +; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[MASK]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[GEP_0_16]], i32 1, <vscale x 4 x i1> [[MASK]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: [[FMUL:%.*]] = fmul <vscale x 4 x float> [[LOAD_0_0]], [[LOAD_0_16]] +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[MASK]]) +; CHECK-NEXT: [[TMP3:%.*]] = select <vscale x 4 x i1> [[MASK]], <vscale x 4 x float> [[FMUL]], <vscale x 4 x float> [[PASSTHROUGH:%.*]] +; CHECK-NEXT: ret <vscale x 4 x float> [[TMP3]] +; + %mask = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) + %load.0.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> zeroinitializer) + %gep.0.16 = getelementptr i8, ptr %0, i32 16 + %load.0.16 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %gep.0.16, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> zeroinitializer) + %fmul = fmul <vscale x 4 x float> %load.0.0, %load.0.16 + call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %fmul, ptr %1, i32 1, <vscale x 4 x i1> %mask) + %load.1.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x float> %passthrough) + ret <vscale x 4 x float> %load.1.0 +} + +define <vscale x 4 x float> @load_mask_differs(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) { +; CHECK-LABEL: @load_mask_differs( +; CHECK-NEXT: [[MASK0:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 8) +; CHECK-NEXT: [[MASK1:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) +; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[GEP_0_16]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: [[FMUL:%.*]] = fmul <vscale x 4 x float> [[LOAD_0_0]], [[LOAD_0_16]] +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[MASK0]]) +; CHECK-NEXT: [[LOAD_1_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP1]], i32 1, <vscale x 4 x i1> [[MASK1]], <vscale x 4 x float> [[PASSTHROUGH:%.*]]) +; CHECK-NEXT: ret <vscale x 4 x float> [[LOAD_1_0]] +; + %mask0 = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 8) + %mask1 = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) + %load.0.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> zeroinitializer) + %gep.0.16 = getelementptr i8, ptr %0, i32 16 + %load.0.16 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %gep.0.16, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> zeroinitializer) + %fmul = fmul <vscale x 4 x float> %load.0.0, %load.0.16 + call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %fmul, ptr %1, i32 1, <vscale x 4 x i1> %mask0) + %load.1.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask1, <vscale x 4 x float> %passthrough) + ret <vscale x 4 x float> %load.1.0 +} + +define <vscale x 4 x float> @store_mask_differs(ptr %0, ptr %1, <vscale x 4 x float> %passthrough) { +; CHECK-LABEL: @store_mask_differs( +; CHECK-NEXT: [[MASK0:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 8) +; CHECK-NEXT: [[MASK1:%.*]] = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) +; CHECK-NEXT: [[LOAD_0_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP0:%.*]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: [[GEP_0_16:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 +; CHECK-NEXT: [[LOAD_0_16:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[GEP_0_16]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> zeroinitializer) +; CHECK-NEXT: [[FMUL:%.*]] = fmul <vscale x 4 x float> [[LOAD_0_0]], [[LOAD_0_16]] +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[FMUL]], ptr [[TMP1:%.*]], i32 1, <vscale x 4 x i1> [[MASK1]]) +; CHECK-NEXT: [[LOAD_1_0:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP1]], i32 1, <vscale x 4 x i1> [[MASK0]], <vscale x 4 x float> [[PASSTHROUGH:%.*]]) +; CHECK-NEXT: ret <vscale x 4 x float> [[LOAD_1_0]] +; + %mask0 = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 8) + %mask1 = tail call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 0, i32 4) + %load.0.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %0, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> zeroinitializer) + %gep.0.16 = getelementptr i8, ptr %0, i32 16 + %load.0.16 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %gep.0.16, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> zeroinitializer) + %fmul = fmul <vscale x 4 x float> %load.0.0, %load.0.16 + call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> %fmul, ptr %1, i32 1, <vscale x 4 x i1> %mask1) + %load.1.0 = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr %1, i32 1, <vscale x 4 x i1> %mask0, <vscale x 4 x float> %passthrough) + ret <vscale x 4 x float> %load.1.0 +} diff --git a/llvm/test/Transforms/InstCombine/fcmp.ll b/llvm/test/Transforms/InstCombine/fcmp.ll index 119cffd..d94e78c 100644 --- a/llvm/test/Transforms/InstCombine/fcmp.ll +++ b/llvm/test/Transforms/InstCombine/fcmp.ll @@ -1812,6 +1812,46 @@ define i1 @fcmp_ule_fsub_const(float %x, float %y) { ret i1 %cmp } +define i1 @fcmp_ninf_ule_fsub_const(float %x, float %y) { +; CHECK-LABEL: @fcmp_ninf_ule_fsub_const( +; CHECK-NEXT: [[CMP:%.*]] = fcmp ule float [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %fs = fsub float %x, %y + %cmp = fcmp ninf ule float %fs, 0.000000e+00 + ret i1 %cmp +} + +define i1 @fcmp_nnan_ule_fsub_const(float %x, float %y) { +; CHECK-LABEL: @fcmp_nnan_ule_fsub_const( +; CHECK-NEXT: [[CMP:%.*]] = fcmp nnan ule float [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %fs = fsub float %x, %y + %cmp = fcmp nnan ule float %fs, 0.000000e+00 + ret i1 %cmp +} + +define i1 @fcmp_ule_fsub_ninf_const(float %x, float %y) { +; CHECK-LABEL: @fcmp_ule_fsub_ninf_const( +; CHECK-NEXT: [[CMP:%.*]] = fcmp ninf ule float [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %fs = fsub ninf float %x, %y + %cmp = fcmp ule float %fs, 0.000000e+00 + ret i1 %cmp +} + +define i1 @fcmp_ule_fsub_nnan_const(float %x, float %y) { +; CHECK-LABEL: @fcmp_ule_fsub_nnan_const( +; CHECK-NEXT: [[CMP:%.*]] = fcmp nnan ule float [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %fs = fsub nnan float %x, %y + %cmp = fcmp ule float %fs, 0.000000e+00 + ret i1 %cmp +} + define i1 @fcmp_ugt_fsub_const(float %x, float %y) { ; CHECK-LABEL: @fcmp_ugt_fsub_const( ; CHECK-NEXT: [[FS:%.*]] = fsub float [[X:%.*]], [[Y:%.*]] diff --git a/llvm/test/Transforms/InstCombine/icmp-clamp.ll b/llvm/test/Transforms/InstCombine/icmp-clamp.ll new file mode 100644 index 0000000..4866dbf --- /dev/null +++ b/llvm/test/Transforms/InstCombine/icmp-clamp.ll @@ -0,0 +1,295 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +declare void @use(i32) + +define i1 @test_i32_eq(i32 %x) { +; CHECK-LABEL: define i1 @test_i32_eq( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], 95 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], 256 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +define i1 @test_i32_ne(i32 %x) { +; CHECK-LABEL: define i1 @test_i32_ne( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], -161 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], -256 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp ne i32 %v2, %x + ret i1 %cmp +} + +define i1 @test_i32_eq_no_add(i32 %x) { +; CHECK-LABEL: define i1 @test_i32_eq_no_add( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X]], 161 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 0) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +define i1 @test_i32_ne_no_add(i32 %x) { +; CHECK-LABEL: define i1 @test_i32_ne_no_add( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X]], 160 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 0) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp ne i32 %v2, %x + ret i1 %cmp +} + +define i1 @test_unsigned_eq(i32 %x) { +; CHECK-LABEL: define i1 @test_unsigned_eq( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], -10 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], 91 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.umax.i32(i32 %x, i32 10) + %v2 = tail call i32 @llvm.umin.i32(i32 %v1, i32 100) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +define i1 @test_unsigned_ne(i32 %x) { +; CHECK-LABEL: define i1 @test_unsigned_ne( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], -101 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], -91 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.umax.i32(i32 %x, i32 10) + %v2 = tail call i32 @llvm.umin.i32(i32 %v1, i32 100) + %cmp = icmp ne i32 %v2, %x + ret i1 %cmp +} + + +; Different bit widths +define i1 @test_i8_eq(i8 %x) { +; CHECK-LABEL: define i1 @test_i8_eq( +; CHECK-SAME: i8 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], 50 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[TMP1]], 101 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i8 @llvm.smax.i8(i8 %x, i8 -50) + %v2 = tail call i8 @llvm.smin.i8(i8 %v1, i8 50) + %cmp = icmp eq i8 %v2, %x + ret i1 %cmp +} + +define i1 @test_i16_eq(i16 %x) { +; CHECK-LABEL: define i1 @test_i16_eq( +; CHECK-SAME: i16 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i16 [[X]], 1000 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i16 [[TMP1]], 2001 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i16 @llvm.smax.i16(i16 %x, i16 -1000) + %v2 = tail call i16 @llvm.smin.i16(i16 %v1, i16 1000) + %cmp = icmp eq i16 %v2, %x + ret i1 %cmp +} + +define i1 @test_i64_eq(i64 %x) { +; CHECK-LABEL: define i1 @test_i64_eq( +; CHECK-SAME: i64 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[X]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP1]], -1 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i64 @llvm.smax.i64(i64 %x, i64 -1) + %v2 = tail call i64 @llvm.smin.i64(i64 %v1, i64 9223372036854775806) + %cmp = icmp eq i64 %v2, %x + ret i1 %cmp +} + +; Negative tests - wrong predicate +define i1 @test_wrong_pred_slt(i32 %x) { +; CHECK-LABEL: define i1 @test_wrong_pred_slt( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], 160 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp slt i32 %v2, %x + ret i1 %cmp +} + + +; Negative tests - not a clamp pattern +define i1 @test_not_clamp_pattern(i32 %x, i32 %y) { +; CHECK-LABEL: define i1 @test_not_clamp_pattern( +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.smax.i32(i32 [[Y]], i32 -95) +; CHECK-NEXT: [[V2:%.*]] = tail call i32 @llvm.smin.i32(i32 [[V1]], i32 160) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V2]], [[X]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %y, i32 -95) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +; Negative tests - Lo >= Hi +define i1 @test_invalid_range(i32 %x) { +; CHECK-LABEL: define i1 @test_invalid_range( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], 50 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 100) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 50) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +; Negative tests - Lo is minimum signed value +define i1 @test_lo_min_signed(i32 %x) { +; CHECK-LABEL: define i1 @test_lo_min_signed( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[X]], 161 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -2147483648) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +; Negative tests - Hi is maximum signed value +define i1 @test_hi_max_signed(i32 %x) { +; CHECK-LABEL: define i1 @test_hi_max_signed( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], -96 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 2147483647) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +; Negative tests - Hi is maximum unsigned value +define i1 @test_hi_max_unsigned(i32 %x) { +; CHECK-LABEL: define i1 @test_hi_max_unsigned( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X]], 9 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.umax.i32(i32 %x, i32 10) + %v2 = tail call i32 @llvm.umin.i32(i32 %v1, i32 4294967295) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +; Multi-use tests - multiple uses of max +define i1 @test_multi_use_max(i32 %x) { +; CHECK-LABEL: define i1 @test_multi_use_max( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.smax.i32(i32 [[X]], i32 -95) +; CHECK-NEXT: call void @use(i32 [[V1]]) +; CHECK-NEXT: [[V2:%.*]] = tail call i32 @llvm.smin.i32(i32 [[V1]], i32 160) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V2]], [[X]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95) + call void @use(i32 %v1) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +; Multi-use tests - multiple uses of min +define i1 @test_multi_use_min(i32 %x) { +; CHECK-LABEL: define i1 @test_multi_use_min( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.smax.i32(i32 [[X]], i32 -95) +; CHECK-NEXT: [[V2:%.*]] = tail call i32 @llvm.smin.i32(i32 [[V1]], i32 160) +; CHECK-NEXT: call void @use(i32 [[V2]]) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V2]], [[X]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + call void @use(i32 %v2) + %cmp = icmp eq i32 %v2, %x + ret i1 %cmp +} + +; Commuted tests +define i1 @test_commuted_eq(i32 %x) { +; CHECK-LABEL: define i1 @test_commuted_eq( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[X]], 95 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], 256 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v1 = tail call i32 @llvm.smax.i32(i32 %x, i32 -95) + %v2 = tail call i32 @llvm.smin.i32(i32 %v1, i32 160) + %cmp = icmp eq i32 %x, %v2 + ret i1 %cmp +} + + +; Vector tests - splat constants +define <2 x i1> @test_vec_splat_eq(<2 x i32> %x) { +; CHECK-LABEL: define <2 x i1> @test_vec_splat_eq( +; CHECK-SAME: <2 x i32> [[X:%.*]]) { +; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[X]], splat (i32 50) +; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i32> [[TMP1]], splat (i32 101) +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %v1 = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %x, <2 x i32> <i32 -50, i32 -50>) + %v2 = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %v1, <2 x i32> <i32 50, i32 50>) + %cmp = icmp eq <2 x i32> %v2, %x + ret <2 x i1> %cmp +} + +; Vector tests - poison elements +define <2 x i1> @test_vec_poison_eq(<2 x i32> %x) { +; CHECK-LABEL: define <2 x i1> @test_vec_poison_eq( +; CHECK-SAME: <2 x i32> [[X:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> [[X]], <2 x i32> <i32 -50, i32 poison>) +; CHECK-NEXT: [[V2:%.*]] = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[V1]], <2 x i32> <i32 50, i32 poison>) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[V2]], [[X]] +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %v1 = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %x, <2 x i32> <i32 -50, i32 poison>) + %v2 = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %v1, <2 x i32> <i32 50, i32 poison>) + %cmp = icmp eq <2 x i32> %v2, %x + ret <2 x i1> %cmp +} + +; Vector tests - non-splat +define <2 x i1> @test_vec_non_splat_eq(<2 x i32> %x) { +; CHECK-LABEL: define <2 x i1> @test_vec_non_splat_eq( +; CHECK-SAME: <2 x i32> [[X:%.*]]) { +; CHECK-NEXT: [[V1:%.*]] = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> [[X]], <2 x i32> <i32 -50, i32 -30>) +; CHECK-NEXT: [[V2:%.*]] = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[V1]], <2 x i32> <i32 50, i32 70>) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[V2]], [[X]] +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %v1 = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %x, <2 x i32> <i32 -50, i32 -30>) + %v2 = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %v1, <2 x i32> <i32 50, i32 70>) + %cmp = icmp eq <2 x i32> %v2, %x + ret <2 x i1> %cmp +} diff --git a/llvm/test/Transforms/LoopUnroll/peel-branch-weights-freq.ll b/llvm/test/Transforms/LoopUnroll/peel-branch-weights-freq.ll new file mode 100644 index 0000000..1339afe --- /dev/null +++ b/llvm/test/Transforms/LoopUnroll/peel-branch-weights-freq.ll @@ -0,0 +1,75 @@ +; Test branch weight metadata, estimated trip count metadata, and block +; frequencies after loop peeling. + +; RUN: opt < %s -S -passes='print<block-freq>' 2>&1 | \ +; RUN: FileCheck -check-prefix=CHECK %s + +; The -implicit-check-not options make sure that no additional labels or calls +; to @f show up. +; RUN: opt < %s -S -passes='loop-unroll,print<block-freq>' \ +; RUN: -unroll-force-peel-count=2 2>&1 | \ +; RUN: FileCheck %s -check-prefix=CHECK-UR \ +; RUN: -implicit-check-not='{{^[^ ;]*:}}' \ +; RUN: -implicit-check-not='call void @f' + +; CHECK: block-frequency-info: test +; CHECK: do.body: float = 10.0, + +; The sum should still be ~10. +; +; CHECK-UR: block-frequency-info: test +; CHECK-UR: - [[DO_BODY_PEEL:.*]]: float = 1.0, +; CHECK-UR: - [[DO_BODY_PEEL2:.*]]: float = 0.9, +; CHECK-UR: - [[DO_BODY:.*]]: float = 8.1, + +declare void @f(i32) + +define void @test(i32 %n) { +; CHECK-UR-LABEL: define void @test( +; CHECK-UR: [[ENTRY:.*]]: +; CHECK-UR: br label %[[DO_BODY_PEEL_BEGIN:.*]] +; CHECK-UR: [[DO_BODY_PEEL_BEGIN]]: +; CHECK-UR: br label %[[DO_BODY_PEEL:.*]] +; CHECK-UR: [[DO_BODY_PEEL]]: +; CHECK-UR: call void @f +; CHECK-UR: br i1 %{{.*}}, label %[[DO_END:.*]], label %[[DO_BODY_PEEL_NEXT:.*]], !prof ![[#PROF:]] +; CHECK-UR: [[DO_BODY_PEEL_NEXT]]: +; CHECK-UR: br label %[[DO_BODY_PEEL2:.*]] +; CHECK-UR: [[DO_BODY_PEEL2]]: +; CHECK-UR: call void @f +; CHECK-UR: br i1 %{{.*}}, label %[[DO_END]], label %[[DO_BODY_PEEL_NEXT1:.*]], !prof ![[#PROF]] +; CHECK-UR: [[DO_BODY_PEEL_NEXT1]]: +; CHECK-UR: br label %[[DO_BODY_PEEL_NEXT5:.*]] +; CHECK-UR: [[DO_BODY_PEEL_NEXT5]]: +; CHECK-UR: br label %[[ENTRY_PEEL_NEWPH:.*]] +; CHECK-UR: [[ENTRY_PEEL_NEWPH]]: +; CHECK-UR: br label %[[DO_BODY]] +; CHECK-UR: [[DO_BODY]]: +; CHECK-UR: call void @f +; CHECK-UR: br i1 %{{.*}}, label %[[DO_END_LOOPEXIT:.*]], label %[[DO_BODY]], !prof ![[#PROF]], !llvm.loop ![[#LOOP_UR_LATCH:]] +; CHECK-UR: [[DO_END_LOOPEXIT]]: +; CHECK-UR: br label %[[DO_END]] +; CHECK-UR: [[DO_END]]: +; CHECK-UR: ret void + +entry: + br label %do.body + +do.body: + %i = phi i32 [ 0, %entry ], [ %inc, %do.body ] + %inc = add i32 %i, 1 + call void @f(i32 %i) + %c = icmp sge i32 %inc, %n + br i1 %c, label %do.end, label %do.body, !prof !0 + +do.end: + ret void +} + +!0 = !{!"branch_weights", i32 1, i32 9} + +; CHECK-UR: ![[#PROF]] = !{!"branch_weights", i32 1, i32 9} +; CHECK-UR: ![[#LOOP_UR_LATCH]] = distinct !{![[#LOOP_UR_LATCH]], ![[#LOOP_UR_PC:]], ![[#LOOP_UR_TC:]], ![[#DISABLE:]]} +; CHECK-UR: ![[#LOOP_UR_PC]] = !{!"llvm.loop.peeled.count", i32 2} +; CHECK-UR: ![[#LOOP_UR_TC]] = !{!"llvm.loop.estimated_trip_count", i32 8} +; CHECK-UR: ![[#DISABLE]] = !{!"llvm.loop.unroll.disable"} diff --git a/llvm/test/Transforms/LoopUnroll/peel-branch-weights.ll b/llvm/test/Transforms/LoopUnroll/peel-branch-weights.ll index c58f8f1..63a0dd4 100644 --- a/llvm/test/Transforms/LoopUnroll/peel-branch-weights.ll +++ b/llvm/test/Transforms/LoopUnroll/peel-branch-weights.ll @@ -15,9 +15,9 @@ define void @test() { ; CHECK: loop.peel: ; CHECK-NEXT: [[X_PEEL:%.*]] = call i32 @get.x() ; CHECK-NEXT: switch i32 [[X_PEEL]], label [[LOOP_LATCH_PEEL:%.*]] [ -; CHECK-NEXT: i32 0, label [[LOOP_LATCH_PEEL]] -; CHECK-NEXT: i32 1, label [[LOOP_EXIT:%.*]] -; CHECK-NEXT: i32 2, label [[LOOP_EXIT]] +; CHECK-NEXT: i32 0, label [[LOOP_LATCH_PEEL]] +; CHECK-NEXT: i32 1, label [[LOOP_EXIT:%.*]] +; CHECK-NEXT: i32 2, label [[LOOP_EXIT]] ; CHECK-NEXT: ], !prof [[PROF0:![0-9]+]] ; CHECK: loop.latch.peel: ; CHECK-NEXT: br label [[LOOP_PEEL_NEXT:%.*]] @@ -26,10 +26,10 @@ define void @test() { ; CHECK: loop.peel2: ; CHECK-NEXT: [[X_PEEL3:%.*]] = call i32 @get.x() ; CHECK-NEXT: switch i32 [[X_PEEL3]], label [[LOOP_LATCH_PEEL4:%.*]] [ -; CHECK-NEXT: i32 0, label [[LOOP_LATCH_PEEL4]] -; CHECK-NEXT: i32 1, label [[LOOP_EXIT]] -; CHECK-NEXT: i32 2, label [[LOOP_EXIT]] -; CHECK-NEXT: ], !prof [[PROF1:![0-9]+]] +; CHECK-NEXT: i32 0, label [[LOOP_LATCH_PEEL4]] +; CHECK-NEXT: i32 1, label [[LOOP_EXIT]] +; CHECK-NEXT: i32 2, label [[LOOP_EXIT]] +; CHECK-NEXT: ], !prof [[PROF0]] ; CHECK: loop.latch.peel4: ; CHECK-NEXT: br label [[LOOP_PEEL_NEXT1:%.*]] ; CHECK: loop.peel.next1: @@ -41,31 +41,33 @@ define void @test() { ; CHECK: loop: ; CHECK-NEXT: [[X:%.*]] = call i32 @get.x() ; CHECK-NEXT: switch i32 [[X]], label [[LOOP_LATCH:%.*]] [ -; CHECK-NEXT: i32 0, label [[LOOP_LATCH]] -; CHECK-NEXT: i32 1, label [[LOOP_EXIT_LOOPEXIT:%.*]] -; CHECK-NEXT: i32 2, label [[LOOP_EXIT_LOOPEXIT]] -; CHECK-NEXT: ], !prof [[PROF2:![0-9]+]] +; CHECK-NEXT: i32 0, label [[LOOP_LATCH]] +; CHECK-NEXT: i32 1, label [[LOOP_EXIT_LOOPEXIT:%.*]] +; CHECK-NEXT: i32 2, label [[LOOP_EXIT_LOOPEXIT]] +; CHECK-NEXT: ], !prof [[PROF0]] ; CHECK: loop.latch: -; CHECK-NEXT: br label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br label [[LOOP]], !llvm.loop [[LOOP1:![0-9]+]] ; CHECK: loop.exit.loopexit: ; CHECK-NEXT: br label [[LOOP_EXIT]] ; CHECK: loop.exit: ; CHECK-NEXT: ret void +; +; DISABLEADV-LABEL: @test( +; DISABLEADV-NEXT: entry: +; DISABLEADV-NEXT: br label [[LOOP:%.*]] +; DISABLEADV: loop: +; DISABLEADV-NEXT: [[X:%.*]] = call i32 @get.x() +; DISABLEADV-NEXT: switch i32 [[X]], label [[LOOP_LATCH:%.*]] [ +; DISABLEADV-NEXT: i32 0, label [[LOOP_LATCH]] +; DISABLEADV-NEXT: i32 1, label [[LOOP_EXIT:%.*]] +; DISABLEADV-NEXT: i32 2, label [[LOOP_EXIT]] +; DISABLEADV-NEXT: ], !prof [[PROF0:![0-9]+]] +; DISABLEADV: loop.latch: +; DISABLEADV-NEXT: br label [[LOOP]] +; DISABLEADV: loop.exit: +; DISABLEADV-NEXT: ret void +; -; DISABLEADV-LABEL: @test() -; DISABLEADV-NEXT: entry: -; DISABLEADV-NEXT: br label %loop -; DISABLEADV: loop -; DISABLEADV-NEXT: %x = call i32 @get.x() -; DISABLEADV-NEXT: switch i32 %x, label %loop.latch [ -; DISABLEADV-NEXT: i32 0, label %loop.latch -; DISABLEADV-NEXT: i32 1, label %loop.exit -; DISABLEADV-NEXT: i32 2, label %loop.exit -; DISABLEADV-NEXT: ], !prof !0 -; DISABLEADV: loop.latch: -; DISABLEADV-NEXT: br label %loop -; DISABLEADV: loop.exit: -; DISABLEADV-NEXT: ret void entry: br label %loop @@ -89,9 +91,9 @@ loop.exit: ;. ; CHECK: [[PROF0]] = !{!"branch_weights", i32 100, i32 200, i32 20, i32 10} -; CHECK: [[PROF1]] = !{!"branch_weights", i32 90, i32 180, i32 20, i32 10} -; CHECK: [[PROF2]] = !{!"branch_weights", i32 80, i32 160, i32 20, i32 10} -; CHECK: [[LOOP3]] = distinct !{!3, !4, !5} -; CHECK: [[META4:![0-9]+]] = !{!"llvm.loop.peeled.count", i32 2} -; CHECK: [[META5:![0-9]+]] = !{!"llvm.loop.unroll.disable"} +; CHECK: [[LOOP1]] = distinct !{[[LOOP1]], [[META2:![0-9]+]], [[META3:![0-9]+]]} +; CHECK: [[META2]] = !{!"llvm.loop.peeled.count", i32 2} +; CHECK: [[META3]] = !{!"llvm.loop.unroll.disable"} +;. +; DISABLEADV: [[PROF0]] = !{!"branch_weights", i32 100, i32 200, i32 20, i32 10} ;. diff --git a/llvm/test/Transforms/LoopUnroll/peel-loop-pgo-deopt.ll b/llvm/test/Transforms/LoopUnroll/peel-loop-pgo-deopt.ll index d91cb5b..e951215 100644 --- a/llvm/test/Transforms/LoopUnroll/peel-loop-pgo-deopt.ll +++ b/llvm/test/Transforms/LoopUnroll/peel-loop-pgo-deopt.ll @@ -15,13 +15,13 @@ ; CHECK: br i1 %{{.*}}, label %[[NEXT0:.*]], label %for.cond.for.end_crit_edge, !prof !16 ; CHECK: [[NEXT0]]: ; CHECK: br i1 %c, label %{{.*}}, label %side_exit, !prof !15 -; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !17 +; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !16 ; CHECK: [[NEXT1]]: ; CHECK: br i1 %c, label %{{.*}}, label %side_exit, !prof !15 -; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !18 +; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !16 ; CHECK: [[NEXT2]]: ; CHECK: br i1 %c, label %{{.*}}, label %side_exit.loopexit, !prof !15 -; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !18 +; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !16, !llvm.loop !17 define i32 @basic(ptr %p, i32 %k, i1 %c) #0 !prof !15 { entry: @@ -84,6 +84,7 @@ attributes #1 = { nounwind optsize } ;CHECK: !15 = !{!"branch_weights", i32 1, i32 0} ; This is a weights of latch and its copies. ;CHECK: !16 = !{!"branch_weights", i32 3001, i32 1001} -;CHECK: !17 = !{!"branch_weights", i32 2000, i32 1001} -;CHECK: !18 = !{!"branch_weights", i32 1001, i32 1001} +;CHECK: !17 = distinct !{!17, !18, !19, {{.*}}} +;CHECK: !18 = !{!"llvm.loop.peeled.count", i32 4} +;CHECK: !19 = !{!"llvm.loop.estimated_trip_count", i32 0} diff --git a/llvm/test/Transforms/LoopUnroll/peel-loop-pgo.ll b/llvm/test/Transforms/LoopUnroll/peel-loop-pgo.ll index 15dce234..dec126f 100644 --- a/llvm/test/Transforms/LoopUnroll/peel-loop-pgo.ll +++ b/llvm/test/Transforms/LoopUnroll/peel-loop-pgo.ll @@ -5,7 +5,7 @@ ; RUN: opt < %s -S -profile-summary-huge-working-set-size-threshold=9 -debug-only=loop-unroll -passes='require<profile-summary>,function(require<opt-remark-emit>,loop-unroll)' 2>&1 | FileCheck %s --check-prefix=NOPEEL ; REQUIRES: asserts -; Make sure we use the profile information correctly to peel-off 3 iterations +; Make sure we use the profile information correctly to peel-off 4 iterations ; from the loop, and update the branch weights for the peeled loop properly. ; CHECK: Loop Unroll: F[basic] @@ -20,11 +20,11 @@ ; CHECK-LABEL: @basic ; CHECK: br i1 %{{.*}}, label %[[NEXT0:.*]], label %for.cond.for.end_crit_edge, !prof !15 ; CHECK: [[NEXT0]]: -; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !16 +; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !15 ; CHECK: [[NEXT1]]: -; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !17 +; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !15 ; CHECK: [[NEXT2]]: -; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !17 +; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !15, !llvm.loop !16 define void @basic(ptr %p, i32 %k) #0 !prof !15 { entry: @@ -104,6 +104,7 @@ attributes #1 = { nounwind optsize } !16 = !{!"branch_weights", i32 3001, i32 1001} ;CHECK: !15 = !{!"branch_weights", i32 3001, i32 1001} -;CHECK: !16 = !{!"branch_weights", i32 2000, i32 1001} -;CHECK: !17 = !{!"branch_weights", i32 1001, i32 1001} +;CHECK: !16 = distinct !{!16, !17, !18, {{.*}}} +;CHECK: !17 = !{!"llvm.loop.peeled.count", i32 4} +;CHECK: !18 = !{!"llvm.loop.estimated_trip_count", i32 0} diff --git a/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll b/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll index ec71c67..0a3d201 100644 --- a/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll +++ b/llvm/test/Transforms/LoopUnroll/scev-invalidation-lcssa.ll @@ -3,7 +3,7 @@ define i32 @f(i1 %cond1) #0 !prof !0 { ; CHECK-LABEL: define i32 @f -; CHECK-SAME: (i1 [[COND1:%.*]]) !prof [[PROF0:![0-9]+]] { +; CHECK-SAME: (i1 [[COND1:%.*]]) {{.*}}{ ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[LOOP1_PEEL_BEGIN:%.*]] ; CHECK: loop1.peel.begin: @@ -19,7 +19,7 @@ define i32 @f(i1 %cond1) #0 !prof !0 { ; CHECK-NEXT: br label [[LOOP1:%.*]] ; CHECK: loop1: ; CHECK-NEXT: [[LD:%.*]] = load i64, ptr null, align 8 -; CHECK-NEXT: br i1 [[COND1]], label [[LOOP1]], label [[EXIT1_LOOPEXIT:%.*]], !prof [[PROF2:![0-9]+]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[COND1]], label [[LOOP1]], label [[EXIT1_LOOPEXIT:%.*]], !prof [[PROF1]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK: exit1.loopexit: ; CHECK-NEXT: [[LD_LCSSA_PH:%.*]] = phi i64 [ [[LD]], [[LOOP1]] ] ; CHECK-NEXT: br label [[EXIT1]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll index 32fdc5cd6..56a1abd 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fmax-without-fast-math-flags.ll @@ -113,3 +113,49 @@ loop: exit: ret float %max.next } + +define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) { +; CHECK-LABEL: define float @test_fmax_and_fmin( +; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MAX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]] +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]] +; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_0]], align 4 +; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 4 +; CHECK-NEXT: [[MAX_NEXT]] = tail call noundef float @llvm.maxnum.f32(float [[MAX]], float [[L_0]]) +; CHECK-NEXT: [[MIN_NEXT]] = tail call noundef float @llvm.minnum.f32(float [[MIN]], float [[L_1]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MAX_NEXT_LCSSA]], [[MIN_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[SUB]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %min = phi float [ 0.000000e+00, %entry ], [ %min.next, %loop ] + %max = phi float [ 0.000000e+00, %entry ], [ %max.next, %loop ] + %gep.src.0 = getelementptr inbounds nuw float, ptr %src.0, i64 %iv + %gep.src.1 = getelementptr inbounds nuw float, ptr %src.1, i64 %iv + %l.0 = load float, ptr %gep.src.0, align 4 + %l.1 = load float, ptr %gep.src.1, align 4 + %max.next = tail call noundef float @llvm.maxnum.f32(float %max, float %l.0) + %min.next = tail call noundef float @llvm.minnum.f32(float %min, float %l.1) + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop + +exit: + %sub = fsub float %max.next, %min.next + ret float %sub +} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll index c3b0bc8..27ca414 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll @@ -86,7 +86,7 @@ define i64 @test_two_ivs(ptr %a, ptr %b, i64 %start) #0 { ; CHECK-NEXT: Cost of 0 for VF 16: induction instruction %i.iv = phi i64 [ 0, %entry ], [ %i.iv.next, %for.body ] ; CHECK-NEXT: Cost of 0 for VF 16: induction instruction %j.iv = phi i64 [ %start, %entry ], [ %j.iv.next, %for.body ] ; CHECK-NEXT: Cost of 0 for VF 16: EMIT vp<{{.+}}> = CANONICAL-INDUCTION ir<0>, vp<%index.next> -; CHECK: Cost for VF 16: 48 +; CHECK: Cost for VF 16: 41 ; CHECK: LV: Selecting VF: 16 entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll index 229209e..5ae0839 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll @@ -204,37 +204,33 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 { ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]] -; CHECK-SVE-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]] -; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]] -; CHECK-SVE-NEXT: [[TMP19]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP5]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP10]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]]) +; CHECK-SVE-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -670,39 +666,35 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE4:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]] -; CHECK-SVE-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]] -; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]] -; CHECK-SVE-NEXT: [[TMP19:%.*]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]] -; CHECK-SVE-NEXT: [[TMP20:%.*]] = mul nsw <vscale x 4 x i32> [[TMP14]], [[TMP15]] -; CHECK-SVE-NEXT: [[TMP21]] = add <vscale x 4 x i32> [[TMP19]], [[TMP20]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) +; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP5]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP11]]) +; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP4]], [[TMP5]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE4]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE3]], <16 x i32> [[TMP12]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-SVE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP21]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE4]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -996,36 +988,32 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]] -; CHECK-SVE-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]] -; CHECK-SVE-NEXT: [[TMP18]] = add <vscale x 4 x i32> [[TMP17]], [[TMP15]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]]) +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP5]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-SVE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP20:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP18]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -1140,32 +1128,28 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 { ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP12:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP11]] -; CHECK-SVE-NEXT: [[TMP14]] = add <vscale x 4 x i32> [[TMP13]], [[TMP12]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[TMP2:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP2]]) +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP3]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-SVE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP14]]) +; CHECK-SVE-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE2]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: @@ -1277,36 +1261,32 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) # ; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2 ; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1 ; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64 -; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]] +; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-SVE: vector.ph: -; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]] +; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 ; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SVE: vector.body: ; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ] +; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]] ; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]] -; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1 -; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1 -; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32> -; CHECK-SVE-NEXT: [[TMP16:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP15]] -; CHECK-SVE-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]] -; CHECK-SVE-NEXT: [[TMP18]] = add <vscale x 4 x i32> [[TMP16]], [[TMP17]] -; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] +; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 +; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1 +; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> +; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]]) +; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]] +; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP6]]) +; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-SVE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-SVE: middle.block: -; CHECK-SVE-NEXT: [[TMP20:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP18]]) +; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]]) ; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK-SVE: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll index dd239c0..8ece59a 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll @@ -81,7 +81,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 { ; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[IV_NEXT]] -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[IV_NEXT]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll index 49e9989..09b41fb 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll @@ -12,40 +12,40 @@ define i32 @sudot(ptr %a, ptr %b) #0 { ; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 3 +; CHECK-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 4 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP6]], i64 [[TMP9]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP6]], align 1 -; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP10]], align 1 -; CHECK-NEXT: [[TMP11:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32> -; CHECK-NEXT: [[TMP12:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32> +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP6]], align 1 +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1 +; CHECK-NEXT: [[TMP11:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i32> ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 3 +; CHECK-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 4 ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP13]], i64 [[TMP16]] -; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x i8>, ptr [[TMP13]], align 1 -; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x i8>, ptr [[TMP17]], align 1 -; CHECK-NEXT: [[TMP18:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD3]] to <vscale x 8 x i32> -; CHECK-NEXT: [[TMP19:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32> -; CHECK-NEXT: [[TMP20:%.*]] = mul <vscale x 8 x i32> [[TMP18]], [[TMP11]] -; CHECK-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]] -; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]]) -; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]]) +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 16 x i8>, ptr [[TMP13]], align 1 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 16 x i8>, ptr [[TMP17]], align 1 +; CHECK-NEXT: [[TMP12:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD3]] to <vscale x 16 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 16 x i32> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP19:%.*]] = mul <vscale x 16 x i32> [[TMP18]], [[TMP7]] +; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP14]]) +; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP19]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[BIN_RDX]]) +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] +; CHECK-NEXT: [[TMP20:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -62,8 +62,8 @@ define i32 @sudot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-NOI8MM: vector.body: ; CHECK-NOI8MM-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] +; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ] +; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-NOI8MM-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NOI8MM-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 3 @@ -82,14 +82,14 @@ define i32 @sudot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: [[TMP19:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32> ; CHECK-NOI8MM-NEXT: [[TMP20:%.*]] = mul <vscale x 8 x i32> [[TMP18]], [[TMP11]] ; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]] -; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]]) -; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]]) +; CHECK-NOI8MM-NEXT: [[TMP22]] = add <vscale x 8 x i32> [[TMP20]], [[VEC_PHI]] +; CHECK-NOI8MM-NEXT: [[TMP23]] = add <vscale x 8 x i32> [[TMP21]], [[VEC_PHI1]] ; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NOI8MM-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NOI8MM-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-NOI8MM: middle.block: -; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-NOI8MM-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[BIN_RDX]]) +; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 8 x i32> [[TMP23]], [[TMP22]] +; CHECK-NOI8MM-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[BIN_RDX]]) ; CHECK-NOI8MM-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NOI8MM-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-NOI8MM: scalar.ph: @@ -123,40 +123,40 @@ define i32 @usdot(ptr %a, ptr %b) #0 { ; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 32 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 3 +; CHECK-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 4 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP6]], i64 [[TMP9]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP6]], align 1 -; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP10]], align 1 -; CHECK-NEXT: [[TMP11:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32> -; CHECK-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32> +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP6]], align 1 +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1 +; CHECK-NEXT: [[TMP11:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i32> ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 3 +; CHECK-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 4 ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP13]], i64 [[TMP16]] -; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x i8>, ptr [[TMP13]], align 1 -; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x i8>, ptr [[TMP17]], align 1 -; CHECK-NEXT: [[TMP18:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD3]] to <vscale x 8 x i32> -; CHECK-NEXT: [[TMP19:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32> -; CHECK-NEXT: [[TMP20:%.*]] = mul <vscale x 8 x i32> [[TMP18]], [[TMP11]] -; CHECK-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]] -; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]]) -; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]]) +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 16 x i8>, ptr [[TMP13]], align 1 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 16 x i8>, ptr [[TMP17]], align 1 +; CHECK-NEXT: [[TMP12:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD3]] to <vscale x 16 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 16 x i32> [[TMP12]], [[TMP11]] +; CHECK-NEXT: [[TMP19:%.*]] = mul <vscale x 16 x i32> [[TMP18]], [[TMP7]] +; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP14]]) +; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP19]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[BIN_RDX]]) +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] +; CHECK-NEXT: [[TMP20:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK: scalar.ph: @@ -173,8 +173,8 @@ define i32 @usdot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-NOI8MM: vector.body: ; CHECK-NOI8MM-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] -; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] +; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ] +; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ] ; CHECK-NOI8MM-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-NOI8MM-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NOI8MM-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 3 @@ -193,14 +193,14 @@ define i32 @usdot(ptr %a, ptr %b) #0 { ; CHECK-NOI8MM-NEXT: [[TMP19:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32> ; CHECK-NOI8MM-NEXT: [[TMP20:%.*]] = mul <vscale x 8 x i32> [[TMP18]], [[TMP11]] ; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]] -; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP20]]) -; CHECK-NOI8MM-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP21]]) +; CHECK-NOI8MM-NEXT: [[TMP22]] = add <vscale x 8 x i32> [[TMP20]], [[VEC_PHI]] +; CHECK-NOI8MM-NEXT: [[TMP23]] = add <vscale x 8 x i32> [[TMP21]], [[VEC_PHI1]] ; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NOI8MM-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NOI8MM-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-NOI8MM: middle.block: -; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] -; CHECK-NOI8MM-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[BIN_RDX]]) +; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 8 x i32> [[TMP23]], [[TMP22]] +; CHECK-NOI8MM-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[BIN_RDX]]) ; CHECK-NOI8MM-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] ; CHECK-NOI8MM-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] ; CHECK-NOI8MM: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll index 6e11e55..3a88273 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll @@ -12,74 +12,62 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT1:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX1]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP16]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP16]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX1]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP20]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul <vscale x 4 x i32> [[TMP18]], [[TMP9]] -; CHECK-INTERLEAVE1-NEXT: [[TMP14]] = add <vscale x 4 x i32> [[TMP13]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT1]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT1]], 1024 +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP14]]) -; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP6]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @dotp( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT1:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX1]] -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP14]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP20]], i64 [[TMP10]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP20]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP20]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX1]] -; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = shl nuw i64 [[TMP26]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP28]], i64 [[TMP27]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x i8>, ptr [[TMP28]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x i8>, ptr [[TMP18]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD3]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD4]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = mul <vscale x 4 x i32> [[TMP19]], [[TMP12]] -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul <vscale x 4 x i32> [[TMP29]], [[TMP13]] -; CHECK-INTERLEAVED-NEXT: [[TMP23]] = add <vscale x 4 x i32> [[TMP30]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 4 x i32> [[TMP22]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], [[TMP3]] -; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT1]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP28]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP28]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = mul <16 x i32> [[TMP6]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP7]], [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP8]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP9]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT1]], 1024 +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[TMP24]], [[TMP23]] -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP11]] ; ; CHECK-MAXBW-LABEL: define i32 @dotp( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { @@ -139,78 +127,52 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP9]], 2 -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP12]] -; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP10]] -; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP10]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD]] to <vscale x 2 x i64> -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD2]] to <vscale x 2 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP13]], [[TMP11]] -; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 2 x i64> [[TMP14]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP10]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i64> [[TMP1]], [[TMP0]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP2]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP15]]) -; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[TMP10]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i64 [[TMP4]] ; ; CHECK-INTERLEAVED-LABEL: define i64 @not_dotp_i8_to_i64_has_neon_dotprod( ; CHECK-INTERLEAVED-SAME: ptr readonly [[A:%.*]], ptr readonly [[B:%.*]]) #[[ATTR1:[0-9]+]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP9]], 4 -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP16]] -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP10]] -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP10]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP25:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = shl nuw i64 [[TMP11]], 1 -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 [[TMP12]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x i8>, ptr [[TMP13]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD3]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = shl nuw i64 [[TMP17]], 1 -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i64 [[TMP18]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP1]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 2 x i8>, ptr [[TMP19]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD4]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD5]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP20]], [[TMP14]] -; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP21]], [[TMP15]] -; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 2 x i64> [[TMP22]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 2 x i64> [[TMP23]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]] -; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP10]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i64> [[TMP1]], [[TMP0]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP2]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i64> [[TMP25]], [[TMP24]] -; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[TMP10]] -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i64 [[TMP4]] ; ; CHECK-MAXBW-LABEL: define i64 @not_dotp_i8_to_i64_has_neon_dotprod( ; CHECK-MAXBW-SAME: ptr readonly [[A:%.*]], ptr readonly [[B:%.*]]) #[[ATTR1:[0-9]+]] { @@ -274,86 +236,66 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly % ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2 -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP12]] -; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = mul i64 [[TMP10]], 2 -; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP20]] -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = mul i64 [[TMP10]], 2 -; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP8]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]] ; CHECK-INTERLEAVE1-NEXT: [[OFFSET_IDX1:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX1]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i16>, ptr [[NEXT_GEP]], align 2 -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = zext <vscale x 2 x i16> [[WIDE_LOAD]] to <vscale x 2 x i64> -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x i16>, ptr [[NEXT_GEP2]], align 2 -; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = zext <vscale x 2 x i16> [[WIDE_LOAD3]] to <vscale x 2 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP15]], [[TMP13]] -; CHECK-INTERLEAVE1-NEXT: [[TMP17]] = add <vscale x 2 x i64> [[TMP16]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] -; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP10]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2 +; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i16>, ptr [[NEXT_GEP2]], align 2 +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <8 x i16> [[WIDE_LOAD3]] to <8 x i64> +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw nsw <8 x i64> [[TMP1]], [[TMP0]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP2]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP17]]) -; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[TMP10]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i64 [[TMP4]] ; ; CHECK-INTERLEAVED-LABEL: define i64 @not_dotp_i16_to_i64_has_neon_dotprod( ; CHECK-INTERLEAVED-SAME: ptr readonly [[A:%.*]], ptr readonly [[B:%.*]]) #[[ATTR1]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4 -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP11]] -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sub i64 1024, [[N_MOD_VF]] -; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul i64 [[TMP15]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul i64 [[TMP15]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP18]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE7:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]] ; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX2:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX2]] -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = shl nuw i64 [[TMP13]], 1 -; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i64 [[TMP14]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i16>, ptr [[NEXT_GEP]], align 2 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 2 x i16>, ptr [[TMP30]], align 2 -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = zext <vscale x 2 x i16> [[WIDE_LOAD]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = zext <vscale x 2 x i16> [[WIDE_LOAD4]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = shl nuw i64 [[TMP19]], 1 -; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i64 [[TMP20]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 2 x i16>, ptr [[NEXT_GEP3]], align 2 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 2 x i16>, ptr [[TMP21]], align 2 -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = zext <vscale x 2 x i16> [[WIDE_LOAD5]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = zext <vscale x 2 x i16> [[WIDE_LOAD6]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP22]], [[TMP16]] -; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP23]], [[TMP17]] -; CHECK-INTERLEAVED-NEXT: [[TMP26]] = add <vscale x 2 x i64> [[TMP24]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP27]] = add <vscale x 2 x i64> [[TMP25]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]] -; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP15]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 8 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2 +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <8 x i16> [[WIDE_LOAD4]] to <8 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 8 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i16>, ptr [[NEXT_GEP3]], align 2 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2 +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[WIDE_LOAD5]] to <8 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <8 x i16> [[WIDE_LOAD6]] to <8 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul nuw nsw <8 x i64> [[TMP4]], [[TMP1]] +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = mul nuw nsw <8 x i64> [[TMP5]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP6]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE7]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI1]], <8 x i64> [[TMP7]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i64> [[TMP27]], [[TMP26]] -; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[TMP15]] -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE7]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i64 [[TMP9]] ; ; CHECK-MAXBW-LABEL: define i64 @not_dotp_i16_to_i64_has_neon_dotprod( ; CHECK-MAXBW-SAME: ptr readonly [[A:%.*]], ptr readonly [[B:%.*]]) #[[ATTR1]] { @@ -497,7 +439,7 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP69]] = add <16 x i32> [[TMP68]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP70:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP69]]) ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] @@ -656,7 +598,7 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP138]] = add <16 x i32> [[TMP136]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP141:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP141]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP141]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP138]], [[TMP137]] ; CHECK-INTERLEAVED-NEXT: [[TMP142:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) @@ -803,7 +745,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = add <vscale x 8 x i32> [[TMP16]], [[TMP17]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = mul nuw i32 [[TMP20]], 8 @@ -851,7 +793,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = add <vscale x 8 x i32> [[TMP25]], [[TMP26]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = mul nuw i32 [[TMP29]], 8 @@ -952,7 +894,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = add <vscale x 8 x i32> [[TMP16]], [[TMP15]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVE1-NEXT: [[TMP24:%.*]] = mul nuw i32 [[TMP23]], 8 @@ -990,7 +932,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = add <vscale x 8 x i32> [[TMP30]], [[TMP22]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = mul nuw i32 [[TMP27]], 8 @@ -1058,22 +1000,18 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-LABEL: define i32 @dotp_unrolled( ; CHECK-INTERLEAVE1-SAME: i32 [[NUM_OUT:%.*]], i64 [[NUM_IN:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVE1-NEXT: entry: -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = shl nuw nsw i64 [[TMP13]], 2 -; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], [[TMP15]] +; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], 16 ; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP16]], 4 -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP18]] +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], 16 ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP41:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP35:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP30:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE13:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE10:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE7:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 1 @@ -1085,38 +1023,38 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 3 ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] ; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP36:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x i8>, ptr [[TMP2]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD4]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP38:%.*]] = mul nsw <vscale x 4 x i32> [[TMP21]], [[TMP36]] -; CHECK-INTERLEAVE1-NEXT: [[TMP23]] = add <vscale x 4 x i32> [[TMP38]], [[VEC_PHI3]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i8>, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD5]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 4 x i8>, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP42:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD6]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP28:%.*]] = mul nsw <vscale x 4 x i32> [[TMP25]], [[TMP42]] -; CHECK-INTERLEAVE1-NEXT: [[TMP30]] = add <vscale x 4 x i32> [[TMP28]], [[VEC_PHI2]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD7:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP31:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD7]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP33:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD8]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP34:%.*]] = mul nsw <vscale x 4 x i32> [[TMP31]], [[TMP33]] -; CHECK-INTERLEAVE1-NEXT: [[TMP35]] = add <vscale x 4 x i32> [[TMP34]], [[VEC_PHI1]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 4 x i8>, ptr [[TMP10]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP37:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD9]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 4 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP39:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD10]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP40:%.*]] = mul nsw <vscale x 4 x i32> [[TMP37]], [[TMP39]] -; CHECK-INTERLEAVE1-NEXT: [[TMP41]] = add <vscale x 4 x i32> [[TMP40]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP12]], [[TMP23]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP13]]) +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD6]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP14]], [[TMP15]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP16]]) +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP18]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP19]]) +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD11:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP21]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP22]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP41]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP35]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP30]]) -; CHECK-INTERLEAVE1-NEXT: [[TMP46:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP23]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP24:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE13]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE10]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE7]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP27:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUM_IN]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: @@ -1124,26 +1062,22 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-LABEL: define i32 @dotp_unrolled( ; CHECK-INTERLEAVED-SAME: i32 [[NUM_OUT:%.*]], i64 [[NUM_IN:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] { ; CHECK-INTERLEAVED-NEXT: entry: -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = shl nuw nsw i64 [[TMP13]], 3 -; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], [[TMP15]] +; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], 32 ; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP16]], 8 -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP18]] +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], 32 ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP80:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP81:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP64:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP65:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI4:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP48:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI5:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP49:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI6:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP50:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI7:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP33:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE28:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE29:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE22:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE23:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI4:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE16:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI5:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE17:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI6:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI7:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE11:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = or disjoint i64 [[INDEX]], 1 @@ -1155,90 +1089,74 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 3 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]] -; CHECK-INTERLEAVED-NEXT: [[TMP56:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = shl nuw i64 [[TMP56]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[TMP20]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP1]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 4 x i8>, ptr [[TMP21]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP66:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD8]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = shl nuw i64 [[TMP25]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP72:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 [[TMP26]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 4 x i8>, ptr [[TMP2]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <vscale x 4 x i8>, ptr [[TMP72]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD9]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP82:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD10]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = mul nsw <vscale x 4 x i32> [[TMP28]], [[TMP66]] -; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = mul nsw <vscale x 4 x i32> [[TMP82]], [[TMP23]] -; CHECK-INTERLEAVED-NEXT: [[TMP50]] = add <vscale x 4 x i32> [[TMP30]], [[VEC_PHI6]] -; CHECK-INTERLEAVED-NEXT: [[TMP33]] = add <vscale x 4 x i32> [[TMP31]], [[VEC_PHI7]] -; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = shl nuw i64 [[TMP35]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 [[TMP36]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD11:%.*]] = load <vscale x 4 x i8>, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load <vscale x 4 x i8>, ptr [[TMP37]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD11]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD12]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = shl nuw i64 [[TMP41]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 [[TMP42]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD13:%.*]] = load <vscale x 4 x i8>, ptr [[TMP5]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD14:%.*]] = load <vscale x 4 x i8>, ptr [[TMP43]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD13]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP45:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD14]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP46:%.*]] = mul nsw <vscale x 4 x i32> [[TMP38]], [[TMP44]] -; CHECK-INTERLEAVED-NEXT: [[TMP47:%.*]] = mul nsw <vscale x 4 x i32> [[TMP39]], [[TMP45]] -; CHECK-INTERLEAVED-NEXT: [[TMP48]] = add <vscale x 4 x i32> [[TMP46]], [[VEC_PHI4]] -; CHECK-INTERLEAVED-NEXT: [[TMP49]] = add <vscale x 4 x i32> [[TMP47]], [[VEC_PHI5]] -; CHECK-INTERLEAVED-NEXT: [[TMP51:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP52:%.*]] = shl nuw i64 [[TMP51]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 [[TMP52]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD16:%.*]] = load <vscale x 4 x i8>, ptr [[TMP53]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP54:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD15]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP55:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD16]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP57:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP58:%.*]] = shl nuw i64 [[TMP57]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP59:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i64 [[TMP58]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD17:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD18:%.*]] = load <vscale x 4 x i8>, ptr [[TMP59]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP60:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD17]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP61:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD18]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP62:%.*]] = mul nsw <vscale x 4 x i32> [[TMP54]], [[TMP60]] -; CHECK-INTERLEAVED-NEXT: [[TMP63:%.*]] = mul nsw <vscale x 4 x i32> [[TMP55]], [[TMP61]] -; CHECK-INTERLEAVED-NEXT: [[TMP64]] = add <vscale x 4 x i32> [[TMP62]], [[VEC_PHI2]] -; CHECK-INTERLEAVED-NEXT: [[TMP65]] = add <vscale x 4 x i32> [[TMP63]], [[VEC_PHI3]] -; CHECK-INTERLEAVED-NEXT: [[TMP67:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP68:%.*]] = shl nuw i64 [[TMP67]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP69:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i64 [[TMP68]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD19:%.*]] = load <vscale x 4 x i8>, ptr [[TMP10]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD20:%.*]] = load <vscale x 4 x i8>, ptr [[TMP69]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP70:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD19]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP71:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD20]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP73:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP74:%.*]] = shl nuw i64 [[TMP73]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP75:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 [[TMP74]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load <vscale x 4 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD22:%.*]] = load <vscale x 4 x i8>, ptr [[TMP75]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP76:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD21]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP77:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD22]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP78:%.*]] = mul nsw <vscale x 4 x i32> [[TMP70]], [[TMP76]] -; CHECK-INTERLEAVED-NEXT: [[TMP79:%.*]] = mul nsw <vscale x 4 x i32> [[TMP71]], [[TMP77]] -; CHECK-INTERLEAVED-NEXT: [[TMP80]] = add <vscale x 4 x i32> [[TMP78]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP81]] = add <vscale x 4 x i32> [[TMP79]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] +; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP43]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP14]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = sext <16 x i8> [[WIDE_LOAD10]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = mul nsw <16 x i32> [[TMP15]], [[TMP12]] +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nsw <16 x i32> [[TMP16]], [[TMP13]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP17]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP18]]) +; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD13:%.*]] = load <16 x i8>, ptr [[TMP19]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[WIDE_LOAD13]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD14:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load <16 x i8>, ptr [[TMP22]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD14]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext <16 x i8> [[WIDE_LOAD15]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP23]] +; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP21]], [[TMP24]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP25]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP26]]) +; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD18:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD19:%.*]] = load <16 x i8>, ptr [[TMP27]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD18]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = sext <16 x i8> [[WIDE_LOAD19]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD20:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load <16 x i8>, ptr [[TMP30]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = sext <16 x i8> [[WIDE_LOAD20]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = sext <16 x i8> [[WIDE_LOAD21]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = mul nsw <16 x i32> [[TMP28]], [[TMP31]] +; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP29]], [[TMP48]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE22]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP33]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE23]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP34]]) +; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD24:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD25:%.*]] = load <16 x i8>, ptr [[TMP35]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = sext <16 x i8> [[WIDE_LOAD24]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = sext <16 x i8> [[WIDE_LOAD25]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD26:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD27:%.*]] = load <16 x i8>, ptr [[TMP38]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = sext <16 x i8> [[WIDE_LOAD26]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = sext <16 x i8> [[WIDE_LOAD27]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = mul nsw <16 x i32> [[TMP36]], [[TMP39]] +; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = mul nsw <16 x i32> [[TMP37]], [[TMP40]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE28]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP41]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE29]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP42]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[TMP81]], [[TMP80]] -; CHECK-INTERLEAVED-NEXT: [[TMP83:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX23:%.*]] = add <vscale x 4 x i32> [[TMP65]], [[TMP64]] -; CHECK-INTERLEAVED-NEXT: [[TMP84:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX23]]) -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX24:%.*]] = add <vscale x 4 x i32> [[TMP49]], [[TMP48]] -; CHECK-INTERLEAVED-NEXT: [[TMP85:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX24]]) -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX25:%.*]] = add <vscale x 4 x i32> [[TMP33]], [[TMP50]] -; CHECK-INTERLEAVED-NEXT: [[TMP86:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX25]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE29]], [[PARTIAL_REDUCE28]] +; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX30:%.*]] = add <4 x i32> [[PARTIAL_REDUCE23]], [[PARTIAL_REDUCE22]] +; CHECK-INTERLEAVED-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX30]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX31:%.*]] = add <4 x i32> [[PARTIAL_REDUCE17]], [[PARTIAL_REDUCE16]] +; CHECK-INTERLEAVED-NEXT: [[TMP46:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX31]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX32:%.*]] = add <4 x i32> [[PARTIAL_REDUCE11]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP47:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX32]]) ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUM_IN]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: @@ -1396,7 +1314,7 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = xor i1 [[TMP20]], true -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]]) ; CHECK-INTERLEAVE1-NEXT: br label [[EXIT:%.*]] @@ -1434,7 +1352,7 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]]) ; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0 ; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = xor i1 [[TMP20]], true -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]]) ; CHECK-INTERLEAVED-NEXT: br label [[EXIT:%.*]] @@ -1525,7 +1443,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP14]] = add <vscale x 4 x i32> [[TMP13]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] ; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP14]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() @@ -1572,7 +1490,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 4 x i32> [[TMP22]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]] ; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[TMP24]], [[TMP23]] ; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) @@ -1607,7 +1525,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP24]] = add <vscale x 8 x i32> [[TMP22]], [[VEC_PHI1]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[TMP24]]) ; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() @@ -1666,7 +1584,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 2 x i64> [[VEC_PHI]], [[TMP14]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP15]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]] @@ -1713,7 +1631,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 2 x i64> [[VEC_PHI1]], [[TMP23]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i64> [[TMP25]], [[TMP24]] ; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[BIN_RDX]]) @@ -1748,7 +1666,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 { ; CHECK-MAXBW-NEXT: [[TMP14]] = add <vscale x 8 x i64> [[VEC_PHI]], [[TMP13]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP14]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]] @@ -1866,7 +1784,7 @@ define void @not_dotp_not_phi2(ptr %matrix, i32 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP23]] = add i32 [[TMP21]], [[TMP15]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add i32 [[TMP23]], [[TMP22]] ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] @@ -1978,7 +1896,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <8 x i64> [[TMP4]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2016,7 +1934,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP9]] = add <8 x i64> [[TMP7]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <8 x i64> [[TMP9]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[BIN_RDX]]) @@ -2053,7 +1971,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> [[TMP11]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2111,7 +2029,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVE1-NEXT: [[TMP5]] = add <8 x i64> [[TMP4]], [[VEC_PHI]] ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2149,7 +2067,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-INTERLEAVED-NEXT: [[TMP9]] = add <8 x i64> [[TMP7]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <8 x i64> [[TMP9]], [[TMP8]] ; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[BIN_RDX]]) @@ -2186,7 +2104,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 { ; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> [[TMP11]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] @@ -2226,36 +2144,32 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP]], label [[FOR_BODY_PREHEADER:%.*]], label [[EXIT:%.*]] ; CHECK-INTERLEAVE1: for.body.preheader: ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 -; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 -; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; CHECK-INTERLEAVE1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16 ; CHECK-INTERLEAVE1-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2 -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] +; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 16 ; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = insertelement <vscale x 2 x i64> zeroinitializer, i64 [[COST]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = insertelement <2 x i64> zeroinitializer, i64 [[COST]], i32 0 ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ [[TMP10]], [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ [[TMP4]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD]] to <vscale x 2 x i64> -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP1]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD2]] to <vscale x 2 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP16]], [[TMP14]] -; CHECK-INTERLEAVE1-NEXT: [[TMP18]] = add <vscale x 2 x i64> [[TMP17]], [[VEC_PHI]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> +; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP6]], [[TMP5]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP10]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP18]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: @@ -2267,50 +2181,32 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP]], label [[FOR_BODY_PREHEADER:%.*]], label [[EXIT:%.*]] ; CHECK-INTERLEAVED: for.body.preheader: ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 -; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16 ; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4 -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 16 ; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = insertelement <vscale x 2 x i64> zeroinitializer, i64 [[COST]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = insertelement <2 x i64> zeroinitializer, i64 [[COST]], i32 0 ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ [[TMP10]], [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP28:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ [[TMP4]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = shl nuw i64 [[TMP14]], 1 -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 [[TMP15]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x i8>, ptr [[TMP16]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD3]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = shl nuw i64 [[TMP20]], 1 -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i64 [[TMP21]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 2 x i8>, ptr [[NEXT_GEP2]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 2 x i8>, ptr [[TMP22]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD4]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD5]] to <vscale x 2 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP23]], [[TMP17]] -; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP24]], [[TMP18]] -; CHECK-INTERLEAVED-NEXT: [[TMP27]] = add <vscale x 2 x i64> [[TMP25]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP28]] = add <vscale x 2 x i64> [[TMP26]], [[VEC_PHI1]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP2]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP6]], [[TMP5]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP10]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i64> [[TMP28]], [[TMP27]] -; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: @@ -2349,7 +2245,7 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 { ; CHECK-MAXBW-NEXT: [[TMP20]] = add <vscale x 8 x i64> [[TMP17]], [[VEC_PHI]] ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]] ; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP20]]) ; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] @@ -2471,7 +2367,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum, ; CHECK-INTERLEAVE1-NEXT: [[TMP36]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: ; CHECK-INTERLEAVE1-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP36]]) ; CHECK-INTERLEAVE1-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP33]]) @@ -2571,7 +2467,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum, ; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: ; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]]) ; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]]) @@ -2671,7 +2567,7 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum, ; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]]) ; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-MAXBW-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-MAXBW-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-MAXBW-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; CHECK-MAXBW: middle.block: ; CHECK-MAXBW-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]]) ; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]]) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll index 11ff688..7bb4715 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll @@ -12,77 +12,65 @@ define i32 @dotp(ptr %a, ptr %b) #0 { ; CHECK-INTERLEAVE1-NEXT: entry: ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVE1: vector.ph: -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 -; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP10]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul <vscale x 4 x i32> [[TMP12]], [[TMP9]] -; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = sub <vscale x 4 x i32> zeroinitializer, [[TMP13]] -; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP14]] -; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] -; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1 +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32> +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], [[TMP1]] +; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = sub <16 x i32> zeroinitializer, [[TMP4]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]]) +; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP15]]) -; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVE1: scalar.ph: +; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) +; CHECK-INTERLEAVE1-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVE1: for.exit: +; CHECK-INTERLEAVE1-NEXT: ret i32 [[TMP8]] ; ; CHECK-INTERLEAVED-LABEL: define i32 @dotp( ; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { ; CHECK-INTERLEAVED-NEXT: entry: ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK-INTERLEAVED: vector.ph: -; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8 -; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] -; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP25:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP9]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP7]], i64 [[TMP10]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP11]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP7]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]] -; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = shl nuw i64 [[TMP16]], 2 -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP14]], i64 [[TMP17]] -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x i8>, ptr [[TMP14]], align 1 -; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x i8>, ptr [[TMP18]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD3]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD4]] to <vscale x 4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = mul <vscale x 4 x i32> [[TMP19]], [[TMP12]] -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul <vscale x 4 x i32> [[TMP20]], [[TMP13]] -; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = sub <vscale x 4 x i32> zeroinitializer, [[TMP21]] -; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sub <vscale x 4 x i32> zeroinitializer, [[TMP22]] -; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP23]] -; CHECK-INTERLEAVED-NEXT: [[TMP26]] = add <vscale x 4 x i32> [[VEC_PHI1]], [[TMP24]] -; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] -; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP14]], i32 16 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP14]], align 1 +; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1 +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32> +; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = mul <16 x i32> [[TMP6]], [[TMP2]] +; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP15]], [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = sub <16 x i32> zeroinitializer, [[TMP8]] +; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = sub <16 x i32> zeroinitializer, [[TMP9]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]]) +; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 +; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; CHECK-INTERLEAVED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[TMP26]], [[TMP25]] -; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]]) -; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]] -; CHECK-INTERLEAVED: scalar.ph: +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]] +; CHECK-INTERLEAVED: for.exit: +; CHECK-INTERLEAVED-NEXT: ret i32 [[TMP13]] ; ; CHECK-MAXBW-LABEL: define i32 @dotp( ; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] { diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll index db3166c..3c2ae1c7 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll @@ -17,16 +17,16 @@ define i32 @zext_add_reduc_i8_i32_sve(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP3]] = add <16 x i32> [[TMP2]], [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP2]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP3]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; @@ -38,22 +38,22 @@ define i32 @zext_add_reduc_i8_i32_sve(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP5]] = add <16 x i32> [[TMP3]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP6]] = add <16 x i32> [[TMP4]], [[VEC_PHI1]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP4]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP6]], [[TMP5]] -; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; @@ -199,16 +199,16 @@ define i64 @zext_add_reduc_i8_i64(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = add <16 x i64> [[TMP3]], [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP3]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> [[TMP4]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; @@ -220,22 +220,22 @@ define i64 @zext_add_reduc_i8_i64(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64> ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP6]] = add <16 x i64> [[TMP4]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <16 x i64> [[TMP5]], [[VEC_PHI1]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP4]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP5]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i64> [[TMP7]], [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; @@ -293,16 +293,16 @@ define i64 @zext_add_reduc_i16_i64(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> -; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = add <8 x i64> [[TMP3]], [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP3]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP4]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; @@ -314,22 +314,22 @@ define i64 @zext_add_reduc_i16_i64(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[TMP1]], i32 8 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2 ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64> ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <8 x i16> [[WIDE_LOAD2]] to <8 x i64> -; CHECK-INTERLEAVED-NEXT: [[TMP6]] = add <8 x i64> [[TMP4]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <8 x i64> [[TMP5]], [[VEC_PHI1]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP4]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI1]], <8 x i64> [[TMP5]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <8 x i64> [[TMP7]], [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; @@ -764,16 +764,16 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> -; CHECK-INTERLEAVE1-NEXT: [[TMP4]] = add <16 x i32> [[TMP3]], [[VEC_PHI]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP4]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVE1: scalar.ph: ; @@ -785,22 +785,22 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 { ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]] ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1 ; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP6]] = add <16 x i32> [[TMP4]], [[VEC_PHI]] -; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <16 x i32> [[TMP5]], [[VEC_PHI1]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP5]]) ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP7]], [[TMP6]] -; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE3]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]] ; CHECK-INTERLEAVED: scalar.ph: ; @@ -984,21 +984,21 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0 ; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer ; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]] -; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = insertelement <16 x i32> zeroinitializer, i32 [[A]], i32 0 +; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[A]], i32 0 ; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32> ; CHECK-INTERLEAVE1-NEXT: br label [[FOR_BODY:%.*]] ; CHECK-INTERLEAVE1: vector.body: ; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ] +; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[FOR_BODY]] ] ; CHECK-INTERLEAVE1-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[INDEX]] ; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]] ; CHECK-INTERLEAVE1-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP4]], align 1 -; CHECK-INTERLEAVE1-NEXT: [[TMP6]] = add <16 x i32> [[VEC_PHI]], [[TMP3]] +; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]]) ; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 ; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVE1: middle.block: -; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP6]]) +; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]]) ; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP0]], [[N_VEC]] ; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVE1: scalar.ph: @@ -1015,26 +1015,26 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 { ; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0 ; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer ; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]] -; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = insertelement <16 x i32> zeroinitializer, i32 [[A]], i32 0 +; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[A]], i32 0 ; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32> ; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-INTERLEAVED: vector.body: ; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ [[TMP12]], [[VECTOR_PH]] ], [ [[TMP7:%.*]], [[VECTOR_BODY]] ] -; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <16 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP8:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ] +; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], [[VECTOR_BODY]] ] ; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[VEC_PHI1]] ; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]] ; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16 ; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP4]], align 1 ; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP6]], align 1 -; CHECK-INTERLEAVED-NEXT: [[TMP7]] = add <16 x i32> [[VEC_PHI]], [[TMP3]] -; CHECK-INTERLEAVED-NEXT: [[TMP8]] = add <16 x i32> [[VEC_PHI2]], [[TMP3]] +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]]) +; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP3]]) ; CHECK-INTERLEAVED-NEXT: [[TMP22]] = add nuw i32 [[VEC_PHI1]], 32 ; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP22]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK-INTERLEAVED: middle.block: -; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP8]], [[TMP7]] -; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]]) +; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[PARTIAL_REDUCE2]], [[PARTIAL_REDUCE]] +; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP2]], [[N_VEC]] ; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK-INTERLEAVED: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll index c61361b..25ee100 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll @@ -192,7 +192,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) { ; CHECK: LV(REG): VF = 16 ; CHECK-NEXT: LV(REG): Found max usage: 2 item ; CHECK-NEXT: LV(REG): RegisterClass: Generic::ScalarRC, 9 registers -; CHECK-NEXT: LV(REG): RegisterClass: Generic::VectorRC, 24 registers +; CHECK-NEXT: LV(REG): RegisterClass: Generic::VectorRC, 12 registers ; CHECK-NEXT: LV(REG): Found invariant usage: 1 item entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll new file mode 100644 index 0000000..e97d6e66d --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-tail-folding.ll @@ -0,0 +1,244 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 +; RUN: opt -passes=loop-vectorize -force-vector-width=2 -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S %s | FileCheck --check-prefix=VF2IC1 %s +; RUN: opt -passes=loop-vectorize -force-vector-width=2 -force-vector-interleave=2 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S %s | FileCheck --check-prefix=VF2IC2 %s +; RUN: opt -passes=loop-vectorize -force-vector-width=1 -force-vector-interleave=2 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S %s | FileCheck --check-prefix=VF1IC2 %s + +define i32 @FOR_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { +; VF2IC1-LABEL: define i32 @FOR_used_outside( +; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF2IC1-NEXT: [[ENTRY:.*]]: +; VF2IC1-NEXT: br label %[[LOOP:.*]] +; VF2IC1: [[LOOP]]: +; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 +; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 +; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1: [[FOR_END]]: +; VF2IC1-NEXT: [[TMP32:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] +; VF2IC1-NEXT: ret i32 [[TMP32]] +; +; VF2IC2-LABEL: define i32 @FOR_used_outside( +; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF2IC2-NEXT: [[ENTRY:.*]]: +; VF2IC2-NEXT: br label %[[LOOP:.*]] +; VF2IC2: [[LOOP]]: +; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 +; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 +; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2: [[FOR_END]]: +; VF2IC2-NEXT: [[TMP66:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] +; VF2IC2-NEXT: ret i32 [[TMP66]] +; +; VF1IC2-LABEL: define i32 @FOR_used_outside( +; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF1IC2-NEXT: [[ENTRY:.*]]: +; VF1IC2-NEXT: br label %[[LOOP:.*]] +; VF1IC2: [[LOOP]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 +; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2: [[FOR_END]]: +; VF1IC2-NEXT: [[TMP30:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] +; VF1IC2-NEXT: ret i32 [[TMP30]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %for = phi i32 [ 33, %entry ], [ %for.next, %loop ] + %gep.A = getelementptr inbounds nuw i32, ptr %A, i64 %iv + %for.next = load i32, ptr %gep.A, align 4 + %add = add nsw i32 %for, %for.next + %gep.B = getelementptr inbounds nuw i32, ptr %B, i64 %iv + store i32 %add, ptr %gep.B, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %for.end, label %loop + +for.end: + ret i32 %for +} + +define i32 @FOR_next_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { +; VF2IC1-LABEL: define i32 @FOR_next_used_outside( +; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF2IC1-NEXT: [[ENTRY:.*]]: +; VF2IC1-NEXT: br label %[[LOOP:.*]] +; VF2IC1: [[LOOP]]: +; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 +; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 +; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1: [[FOR_END]]: +; VF2IC1-NEXT: [[TMP28:%.*]] = phi i32 [ [[TMP10]], %[[LOOP]] ] +; VF2IC1-NEXT: ret i32 [[TMP28]] +; +; VF2IC2-LABEL: define i32 @FOR_next_used_outside( +; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF2IC2-NEXT: [[ENTRY:.*]]: +; VF2IC2-NEXT: br label %[[LOOP:.*]] +; VF2IC2: [[LOOP]]: +; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 +; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 +; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2: [[FOR_END]]: +; VF2IC2-NEXT: [[TMP62:%.*]] = phi i32 [ [[TMP23]], %[[LOOP]] ] +; VF2IC2-NEXT: ret i32 [[TMP62]] +; +; VF1IC2-LABEL: define i32 @FOR_next_used_outside( +; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF1IC2-NEXT: [[ENTRY:.*]]: +; VF1IC2-NEXT: br label %[[LOOP:.*]] +; VF1IC2: [[LOOP]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 +; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2: [[FOR_END]]: +; VF1IC2-NEXT: [[TMP27:%.*]] = phi i32 [ [[TMP7]], %[[LOOP]] ] +; VF1IC2-NEXT: ret i32 [[TMP27]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %for = phi i32 [ 33, %entry ], [ %for.next, %loop ] + %gep.A = getelementptr inbounds nuw i32, ptr %A, i64 %iv + %for.next = load i32, ptr %gep.A, align 4 + %add = add nsw i32 %for, %for.next + %gep.B = getelementptr inbounds nuw i32, ptr %B, i64 %iv + store i32 %add, ptr %gep.B, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %for.end, label %loop + +for.end: + ret i32 %for.next +} + +define i32 @FOR_and_next_used_outside(ptr noalias %A, ptr noalias %B, i64 %n) { +; VF2IC1-LABEL: define i32 @FOR_and_next_used_outside( +; VF2IC1-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF2IC1-NEXT: [[ENTRY:.*]]: +; VF2IC1-NEXT: br label %[[LOOP:.*]] +; VF2IC1: [[LOOP]]: +; VF2IC1-NEXT: [[TMP1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP10:%.*]], %[[LOOP]] ] +; VF2IC1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP1]] +; VF2IC1-NEXT: [[TMP10]] = load i32, ptr [[TMP9]], align 4 +; VF2IC1-NEXT: [[TMP23:%.*]] = add nsw i32 [[FOR]], [[TMP10]] +; VF2IC1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP1]] +; VF2IC1-NEXT: store i32 [[TMP23]], ptr [[TMP20]], align 4 +; VF2IC1-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP1]], 1 +; VF2IC1-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF2IC1-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC1: [[FOR_END]]: +; VF2IC1-NEXT: [[TMP32:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] +; VF2IC1-NEXT: [[TMP33:%.*]] = phi i32 [ [[TMP10]], %[[LOOP]] ] +; VF2IC1-NEXT: [[RES:%.*]] = add i32 [[TMP32]], [[TMP33]] +; VF2IC1-NEXT: ret i32 [[RES]] +; +; VF2IC2-LABEL: define i32 @FOR_and_next_used_outside( +; VF2IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF2IC2-NEXT: [[ENTRY:.*]]: +; VF2IC2-NEXT: br label %[[LOOP:.*]] +; VF2IC2: [[LOOP]]: +; VF2IC2-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP23:%.*]], %[[LOOP]] ] +; VF2IC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP3]] +; VF2IC2-NEXT: [[TMP23]] = load i32, ptr [[TMP22]], align 4 +; VF2IC2-NEXT: [[TMP47:%.*]] = add nsw i32 [[FOR]], [[TMP23]] +; VF2IC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP3]] +; VF2IC2-NEXT: store i32 [[TMP47]], ptr [[TMP44]], align 4 +; VF2IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP3]], 1 +; VF2IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF2IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF2IC2: [[FOR_END]]: +; VF2IC2-NEXT: [[TMP66:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] +; VF2IC2-NEXT: [[TMP71:%.*]] = phi i32 [ [[TMP23]], %[[LOOP]] ] +; VF2IC2-NEXT: [[RES:%.*]] = add i32 [[TMP66]], [[TMP71]] +; VF2IC2-NEXT: ret i32 [[RES]] +; +; VF1IC2-LABEL: define i32 @FOR_and_next_used_outside( +; VF1IC2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) { +; VF1IC2-NEXT: [[ENTRY:.*]]: +; VF1IC2-NEXT: br label %[[LOOP:.*]] +; VF1IC2: [[LOOP]]: +; VF1IC2-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[FOR:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP7:%.*]], %[[LOOP]] ] +; VF1IC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP0]] +; VF1IC2-NEXT: [[TMP7]] = load i32, ptr [[TMP6]], align 4 +; VF1IC2-NEXT: [[TMP12:%.*]] = add nsw i32 [[FOR]], [[TMP7]] +; VF1IC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[TMP0]] +; VF1IC2-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4 +; VF1IC2-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP0]], 1 +; VF1IC2-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; VF1IC2-NEXT: br i1 [[EC]], label %[[FOR_END:.*]], label %[[LOOP]] +; VF1IC2: [[FOR_END]]: +; VF1IC2-NEXT: [[TMP30:%.*]] = phi i32 [ [[FOR]], %[[LOOP]] ] +; VF1IC2-NEXT: [[TMP33:%.*]] = phi i32 [ [[TMP7]], %[[LOOP]] ] +; VF1IC2-NEXT: [[RES:%.*]] = add i32 [[TMP30]], [[TMP33]] +; VF1IC2-NEXT: ret i32 [[RES]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %for = phi i32 [ 33, %entry ], [ %for.next, %loop ] + %gep.A = getelementptr inbounds nuw i32, ptr %A, i64 %iv + %for.next = load i32, ptr %gep.A, align 4 + %add = add nsw i32 %for, %for.next + %gep.B = getelementptr inbounds nuw i32, ptr %B, i64 %iv + store i32 %add, ptr %gep.B, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %for.end, label %loop + +for.end: + %res = add i32 %for, %for.next + ret i32 %res +} + + diff --git a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll index 616f156..5b7c27a 100644 --- a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags-interleave.ll @@ -113,3 +113,49 @@ loop: exit: ret float %max.next } + +define float @test_fmax_and_fmin(ptr %src.0, ptr %src.1, i64 %n) { +; CHECK-LABEL: define float @test_fmax_and_fmin( +; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MAX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]] +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]] +; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_0]], align 4 +; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 4 +; CHECK-NEXT: [[MAX_NEXT]] = tail call noundef float @llvm.maxnum.f32(float [[MAX]], float [[L_0]]) +; CHECK-NEXT: [[MIN_NEXT]] = tail call noundef float @llvm.minnum.f32(float [[MIN]], float [[L_1]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MAX_NEXT_LCSSA]], [[MIN_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[SUB]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %min = phi float [ 0.000000e+00, %entry ], [ %min.next, %loop ] + %max = phi float [ 0.000000e+00, %entry ], [ %max.next, %loop ] + %gep.src.0 = getelementptr inbounds nuw float, ptr %src.0, i64 %iv + %gep.src.1 = getelementptr inbounds nuw float, ptr %src.1, i64 %iv + %l.0 = load float, ptr %gep.src.0, align 4 + %l.1 = load float, ptr %gep.src.1, align 4 + %max.next = tail call noundef float @llvm.maxnum.f32(float %max, float %l.0) + %min.next = tail call noundef float @llvm.minnum.f32(float %min, float %l.1) + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop + +exit: + %sub = fsub float %max.next, %min.next + ret float %sub +} diff --git a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll index 1a2b233..8b6a6e1 100644 --- a/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/fmax-without-fast-math-flags.ll @@ -683,3 +683,49 @@ loop: exit: ret float %max.next } + +define float @test_fmax_and_fmax(ptr %src.0, ptr %src.1, i64 %n) { +; CHECK-LABEL: define float @test_fmax_and_fmax( +; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MIN_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[MAX:%.*]] = phi float [ 0.000000e+00, %[[ENTRY]] ], [ [[MAX_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_SRC_0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_0]], i64 [[IV]] +; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC_1]], i64 [[IV]] +; CHECK-NEXT: [[L_0:%.*]] = load float, ptr [[GEP_SRC_0]], align 4 +; CHECK-NEXT: [[L_1:%.*]] = load float, ptr [[GEP_SRC_1]], align 4 +; CHECK-NEXT: [[MAX_NEXT]] = tail call noundef float @llvm.maxnum.f32(float [[MAX]], float [[L_0]]) +; CHECK-NEXT: [[MIN_NEXT]] = tail call noundef float @llvm.minnum.f32(float [[MIN]], float [[L_1]]) +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[MAX_NEXT_LCSSA:%.*]] = phi float [ [[MAX_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[MIN_NEXT_LCSSA:%.*]] = phi float [ [[MIN_NEXT]], %[[LOOP]] ] +; CHECK-NEXT: [[SUB:%.*]] = fsub float [[MAX_NEXT_LCSSA]], [[MIN_NEXT_LCSSA]] +; CHECK-NEXT: ret float [[SUB]] +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %min = phi float [ 0.000000e+00, %entry ], [ %min.next, %loop ] + %max = phi float [ 0.000000e+00, %entry ], [ %max.next, %loop ] + %gep.src.0 = getelementptr inbounds nuw float, ptr %src.0, i64 %iv + %gep.src.1 = getelementptr inbounds nuw float, ptr %src.1, i64 %iv + %l.0 = load float, ptr %gep.src.0, align 4 + %l.1 = load float, ptr %gep.src.1, align 4 + %max.next = tail call noundef float @llvm.maxnum.f32(float %max, float %l.0) + %min.next = tail call noundef float @llvm.minnum.f32(float %min, float %l.1) + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, %n + br i1 %ec, label %exit, label %loop + +exit: + %sub = fsub float %max.next, %min.next + ret float %sub +} diff --git a/llvm/test/Transforms/LoopVectorize/reduction-order.ll b/llvm/test/Transforms/LoopVectorize/reduction-order.ll index b07c3833..b51db48 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-order.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-order.ll @@ -1,63 +1,93 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 ; RUN: opt -passes='loop-vectorize' -force-vector-width=4 -force-vector-interleave=1 -S < %s 2>&1 | FileCheck %s target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" ; Make sure the selects generated from reduction are always emitted ; in deterministic order. -; CHECK-LABEL: @foo( -; CHECK: vector.body: -; CHECK: [[VEC_PHI_1:%.+]] = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ [[ADD_5:%.+]], %vector.body ] -; CHECK: [[VEC_PHI_2:%.+]] = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ [[ADD_3:%.+]], %vector.body ] -; CHECK: icmp ule <4 x i64> -; CHECK-NEXT: [[ADD_3]] = add <4 x i32> splat (i32 3), [[VEC_PHI_2]] -; CHECK-NEXT: [[ADD_5]] = add <4 x i32> [[VEC_PHI_1]], splat (i32 5) -; CHECK: select <4 x i1> {{.*}}, <4 x i32> [[ADD_5]], <4 x i32> -; CHECK-NEXT: select <4 x i1> {{.*}}, <4 x i32> [[ADD_3]], <4 x i32> -; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body ; -define internal i64 @foo(ptr %t0) !prof !1 { -t16: - br label %t20 - -t17: ; preds = %t20 - %t18 = phi i32 [ %t24, %t20 ] - %t19 = phi i32 [ %t28, %t20 ] - br label %t31 +define i32 @foo() !prof !1 { +; CHECK-LABEL: define i32 @foo() {{.*}}{ +; CHECK-NEXT: [[T16:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI_1:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[ADD_5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI_2:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[ADD_3:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[VEC_IV:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3> +; CHECK-NEXT: [[TMP0:%.*]] = icmp ule <4 x i64> [[VEC_IV]], splat (i64 9) +; CHECK-NEXT: [[ADD_3]] = add <4 x i32> splat (i32 3), [[VEC_PHI_2]] +; CHECK-NEXT: [[ADD_5]] = add <4 x i32> [[VEC_PHI_1]], splat (i32 5) +; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[ADD_5]], <4 x i32> [[VEC_PHI_1]] +; CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[ADD_3]], <4 x i32> [[VEC_PHI_2]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 12 +; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !prof [[PROF1:![0-9]+]], !llvm.loop [[LOOP2:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]]) +; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) +; CHECK-NEXT: br label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP7]], [[TMP6]] +; CHECK-NEXT: ret i32 [[ADD]] +; +entry: + br label %loop -t20: ; preds = %t20, %t16 - %t21 = phi i64 [ 0, %t16 ], [ %t29, %t20 ] - %t22 = phi i32 [ 0, %t16 ], [ %t28, %t20 ] - %t23 = phi i32 [ 0, %t16 ], [ %t24, %t20 ] - %t24 = add i32 3, %t23 - %t28 = add i32 %t22, 5 - %t29 = add nuw nsw i64 %t21, 1 - %t30 = icmp eq i64 %t29, 10 - br i1 %t30, label %t17, label %t20, !prof !2 +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %red.1 = phi i32 [ 0, %entry ], [ %red.1.next, %loop ] + %red.2 = phi i32 [ 0, %entry ], [ %red.2.next, %loop ] + %red.2.next = add i32 3, %red.2 + %red.1.next = add i32 %red.1, 5 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 10 + br i1 %ec, label %exit, label %loop, !prof !2 -t31: - ret i64 undef +exit: + %r.2 = phi i32 [ %red.2.next, %loop ] + %r.1 = phi i32 [ %red.1.next, %loop ] + %add = add i32 %r.2, %r.1 + ret i32 %add } ; Make sure we do not fail when checking for ordered reduction. This test just ; exercises the path and bails out without performing vectorization. -; CHECK-LABEL: quux -; CHECK-NOT: fadd <4 x -define void @quux(i1 %arg) { -bb: +define double @quux(i1 %arg) { +; CHECK-LABEL: define double @quux( +; CHECK-SAME: i1 [[ARG:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[HEADER:.*]] +; CHECK: [[HEADER]]: +; CHECK-NEXT: [[TMP5:%.*]] = phi double [ 1.300000e+01, %[[ENTRY]] ], [ [[TMP:%.*]], %[[LATCH:.*]] ] +; CHECK-NEXT: [[TMP6:%.*]] = fadd double [[TMP5]], 1.000000e+00 +; CHECK-NEXT: br label %[[LATCH]] +; CHECK: [[LATCH]]: +; CHECK-NEXT: [[TMP]] = phi double [ [[TMP6]], %[[HEADER]] ] +; CHECK-NEXT: br i1 [[ARG]], label %[[HEADER]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[R:%.*]] = phi double [ [[TMP]], %[[LATCH]] ] +; CHECK-NEXT: ret double [[R]] +; +entry: br label %header -latch: ; preds = %header - %tmp = phi double [ %tmp6, %header ] - br i1 %arg, label %header, label %bb2 - -bb2: ; preds = %latch - %tmp3 = phi double [ %tmp, %latch ] - ret void - -header: ; preds = %latch, %bb - %tmp5 = phi double [ 1.300000e+01, %bb ], [ %tmp, %latch ] +header: + %tmp5 = phi double [ 1.300000e+01, %entry ], [ %tmp, %latch ] %tmp6 = fadd double %tmp5, 1.000000e+00 br label %latch + +latch: + %tmp = phi double [ %tmp6, %header ] + br i1 %arg, label %header, label %exit + +exit: + %r = phi double [ %tmp, %latch ] + ret double %r } !1 = !{!"function_entry_count", i64 801} diff --git a/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll b/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll index 644900d..f794620 100644 --- a/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll +++ b/llvm/test/Transforms/LoopVectorize/single-early-exit-deref-assumptions.ll @@ -500,3 +500,50 @@ exit: %first.addr.0.lcssa.i = phi ptr [ %first, %entry ], [ %iv, %loop.header ], [ %iv.next, %loop.latch ] ret ptr %first.addr.0.lcssa.i } + +define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_size_nofree_via_context(ptr noalias %p1, ptr noalias %p2) nosync { +; CHECK-LABEL: define i64 @early_exit_alignment_and_deref_known_via_assumption_with_constant_size_nofree_via_context( +; CHECK-SAME: ptr noalias [[P1:%.*]], ptr noalias [[P2:%.*]]) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P1]], i64 4), "dereferenceable"(ptr [[P1]], i64 1024) ] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i64 4), "dereferenceable"(ptr [[P2]], i64 1024) ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], %[[LOOP_INC:.*]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[P1]], i64 [[INDEX1]] +; CHECK-NEXT: [[LD1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 1 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[P2]], i64 [[INDEX1]] +; CHECK-NEXT: [[LD2:%.*]] = load i8, ptr [[TMP1]], align 1 +; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i8 [[LD1]], [[LD2]] +; CHECK-NEXT: br i1 [[CMP3]], label %[[LOOP_INC]], label %[[LOOP_END:.*]] +; CHECK: [[LOOP_INC]]: +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX1]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDEX_NEXT]], 1024 +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[LOOP_END]] +; CHECK: [[LOOP_END]]: +; CHECK-NEXT: [[RETVAL:%.*]] = phi i64 [ [[INDEX1]], %[[LOOP]] ], [ -1, %[[LOOP_INC]] ] +; CHECK-NEXT: ret i64 [[RETVAL]] +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %p1, i64 4), "dereferenceable"(ptr %p1, i64 1024) ] + call void @llvm.assume(i1 true) [ "align"(ptr %p2, i64 4), "dereferenceable"(ptr %p2, i64 1024) ] + br label %loop + +loop: + %index = phi i64 [ %index.next, %loop.inc ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i8, ptr %p1, i64 %index + %ld1 = load i8, ptr %arrayidx, align 1 + %arrayidx1 = getelementptr inbounds i8, ptr %p2, i64 %index + %ld2 = load i8, ptr %arrayidx1, align 1 + %cmp3 = icmp eq i8 %ld1, %ld2 + br i1 %cmp3, label %loop.inc, label %loop.end + +loop.inc: + %index.next = add i64 %index, 1 + %exitcond = icmp ne i64 %index.next, 1024 + br i1 %exitcond, label %loop, label %loop.end + +loop.end: + %retval = phi i64 [ %index, %loop ], [ -1, %loop.inc ] + ret i64 %retval +} diff --git a/llvm/test/tools/llvm-objdump/ELF/AMDGPU/kd-gfx1250.s b/llvm/test/tools/llvm-objdump/ELF/AMDGPU/kd-gfx1250.s index 3e96ea3..13f20bf 100644 --- a/llvm/test/tools/llvm-objdump/ELF/AMDGPU/kd-gfx1250.s +++ b/llvm/test/tools/llvm-objdump/ELF/AMDGPU/kd-gfx1250.s @@ -20,7 +20,7 @@ ; CHECK-NEXT: ; IMAGE_OP 0 ; CHECK-NEXT: .amdhsa_next_free_vgpr 32 ; CHECK-NEXT: .amdhsa_reserve_vcc 0 -; CHECK-NEXT: .amdhsa_reserve_xnack_mask 0 +; CHECK-NEXT: .amdhsa_reserve_xnack_mask 1 ; CHECK-NEXT: .amdhsa_next_free_sgpr 8 ; CHECK-NEXT: .amdhsa_float_round_mode_32 0 ; CHECK-NEXT: .amdhsa_float_round_mode_16_64 0 @@ -76,7 +76,7 @@ ; CHECK-NEXT: ; IMAGE_OP 0 ; CHECK-NEXT: .amdhsa_next_free_vgpr 32 ; CHECK-NEXT: .amdhsa_reserve_vcc 0 -; CHECK-NEXT: .amdhsa_reserve_xnack_mask 0 +; CHECK-NEXT: .amdhsa_reserve_xnack_mask 1 ; CHECK-NEXT: .amdhsa_next_free_sgpr 8 ; CHECK-NEXT: .amdhsa_float_round_mode_32 0 ; CHECK-NEXT: .amdhsa_float_round_mode_16_64 0 diff --git a/llvm/tools/llvm-jitlink/llvm-jitlink.cpp b/llvm/tools/llvm-jitlink/llvm-jitlink.cpp index e09ddb4..731d648 100644 --- a/llvm/tools/llvm-jitlink/llvm-jitlink.cpp +++ b/llvm/tools/llvm-jitlink/llvm-jitlink.cpp @@ -1636,7 +1636,11 @@ static std::pair<Triple, SubtargetFeatures> getFirstFileTripleAndFeatures() { case file_magic::macho_object: { auto Obj = ExitOnErr( object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef())); - Triple TT = Obj->makeTriple(); + Triple TT; + if (auto *MachOObj = dyn_cast<object::MachOObjectFile>(Obj.get())) + TT = MachOObj->getArchTriple(); + else + TT = Obj->makeTriple(); if (Magic == file_magic::coff_object) { // TODO: Move this to makeTriple() if possible. TT.setObjectFormat(Triple::COFF); diff --git a/llvm/unittests/ADT/APFloatTest.cpp b/llvm/unittests/ADT/APFloatTest.cpp index 141282e..30f0a8e5 100644 --- a/llvm/unittests/ADT/APFloatTest.cpp +++ b/llvm/unittests/ADT/APFloatTest.cpp @@ -10176,4 +10176,11 @@ TEST(APFloatTest, hasSignBitInMSB) { EXPECT_FALSE(APFloat::hasSignBitInMSB(APFloat::Float8E8M0FNU())); } +TEST(APFloatTest, FrexpQuietSNaN) { + APFloat SNaN = APFloat::getSNaN(APFloat::PPCDoubleDouble()); + int Exp; + APFloat Result = frexp(SNaN, Exp, APFloat::rmNearestTiesToEven); + EXPECT_FALSE(Result.isSignaling()); +} + } // namespace diff --git a/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp b/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp index d1c0f64..d8457a3 100644 --- a/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp +++ b/llvm/unittests/Analysis/MemoryProfileInfoTest.cpp @@ -230,8 +230,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef) CallBase *Call = findCall(*Func, "call"); Trie.buildAndAttachMIBMetadata(Call); - EXPECT_TRUE(Call->hasFnAttr("memprof")); - EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous"); + EXPECT_FALSE(Call->hasFnAttr("memprof")); EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof)); MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof); ASSERT_EQ(MemProfMD->getNumOperands(), 2u); @@ -280,8 +279,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef) CallBase *Call = findCall(*Func, "call"); Trie.buildAndAttachMIBMetadata(Call); - EXPECT_TRUE(Call->hasFnAttr("memprof")); - EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous"); + EXPECT_FALSE(Call->hasFnAttr("memprof")); EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof)); MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof); ASSERT_EQ(MemProfMD->getNumOperands(), 2u); @@ -335,8 +333,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef) CallBase *Call = findCall(*Func, "call"); Trie.buildAndAttachMIBMetadata(Call); - EXPECT_TRUE(Call->hasFnAttr("memprof")); - EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous"); + EXPECT_FALSE(Call->hasFnAttr("memprof")); EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof)); MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof); ASSERT_EQ(MemProfMD->getNumOperands(), 2u); @@ -395,8 +392,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef) CallBase *Call = findCall(*Func, "call"); Trie.buildAndAttachMIBMetadata(Call); - EXPECT_TRUE(Call->hasFnAttr("memprof")); - EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous"); + EXPECT_FALSE(Call->hasFnAttr("memprof")); EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof)); MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof); ASSERT_EQ(MemProfMD->getNumOperands(), 2u); @@ -467,8 +463,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef) ASSERT_NE(Call, nullptr); Trie.buildAndAttachMIBMetadata(Call); - EXPECT_TRUE(Call->hasFnAttr("memprof")); - EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous"); + EXPECT_FALSE(Call->hasFnAttr("memprof")); EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof)); MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof); EXPECT_THAT(MemProfMD, MemprofMetadataEquals(ExpectedVals)); @@ -541,8 +536,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef) // Restore original option value. MemProfKeepAllNotColdContexts = OrigMemProfKeepAllNotColdContexts; - EXPECT_TRUE(Call->hasFnAttr("memprof")); - EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous"); + EXPECT_FALSE(Call->hasFnAttr("memprof")); EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof)); MDNode *MemProfMD = Call->getMetadata(LLVMContext::MD_memprof); EXPECT_THAT(MemProfMD, MemprofMetadataEquals(ExpectedVals)); @@ -670,8 +664,7 @@ declare dso_local noalias noundef i8* @malloc(i64 noundef) // The hot allocations will be converted to NotCold and pruned as they // are unnecessary to determine how to clone the cold allocation. - EXPECT_TRUE(Call->hasFnAttr("memprof")); - EXPECT_EQ(Call->getFnAttr("memprof").getValueAsString(), "ambiguous"); + EXPECT_FALSE(Call->hasFnAttr("memprof")); EXPECT_TRUE(Call->hasMetadata(LLVMContext::MD_memprof)); MemProfMD = Call->getMetadata(LLVMContext::MD_memprof); ASSERT_EQ(MemProfMD->getNumOperands(), 2u); diff --git a/llvm/unittests/Frontend/CMakeLists.txt b/llvm/unittests/Frontend/CMakeLists.txt index 836a844..1ce34e7 100644 --- a/llvm/unittests/Frontend/CMakeLists.txt +++ b/llvm/unittests/Frontend/CMakeLists.txt @@ -1,5 +1,6 @@ set(LLVM_LINK_COMPONENTS Analysis + BinaryFormat Core FrontendHLSL FrontendOffloading diff --git a/llvm/unittests/Object/BuildIDTest.cpp b/llvm/unittests/Object/BuildIDTest.cpp new file mode 100644 index 0000000..04ca636 --- /dev/null +++ b/llvm/unittests/Object/BuildIDTest.cpp @@ -0,0 +1,120 @@ +//===- BuildIDTest.cpp - Tests for getBuildID ----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/Object/BuildID.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Object/ELFObjectFile.h" +#include "llvm/ObjectYAML/yaml2obj.h" +#include "llvm/Support/YAMLTraits.h" +#include "llvm/Testing/Support/Error.h" + +#include "gtest/gtest.h" + +using namespace llvm; +using namespace llvm::object; + +template <class ELFT> +static Expected<ELFObjectFile<ELFT>> toBinary(SmallVectorImpl<char> &Storage, + StringRef Yaml) { + raw_svector_ostream OS(Storage); + yaml::Input YIn(Yaml); + if (!yaml::convertYAML(YIn, OS, [](const Twine &Msg) {})) + return createStringError(std::errc::invalid_argument, + "unable to convert YAML"); + return ELFObjectFile<ELFT>::create(MemoryBufferRef(OS.str(), "dummyELF")); +} + +static StringRef getInvalidNoteELF(bool WithShdr) { + static std::string WithSection(R"( +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC + Machine: EM_X86_64 +ProgramHeaders: + - Type: PT_NOTE + FileSize: 0x1a + FirstSec: .note.gnu.build-id + LastSec: .note.gnu.build-id +Sections: + - Name: .note.gnu.build-id + Type: SHT_NOTE + AddressAlign: 0x04 + Notes: + - Name: "GNU" + Desc: "abb50d82b6bdc861" + Type: 3 +)"); + static std::string WithoutSection(WithSection + R"( + - Type: SectionHeaderTable + NoHeaders: true +)"); + if (WithShdr) + return WithSection; + return WithoutSection; +} + +// The BuildID can be looked up from a section header, if there is no program +// header. +TEST(BuildIDTest, InvalidPhdrFileSizeWithShdrs) { + SmallString<0> Storage; + Expected<ELFObjectFile<ELF64LE>> ElfOrErr = + toBinary<ELF64LE>(Storage, getInvalidNoteELF(true)); + ASSERT_THAT_EXPECTED(ElfOrErr, Succeeded()); + BuildIDRef BuildID = getBuildID(&ElfOrErr.get()); + EXPECT_EQ( + StringRef(reinterpret_cast<const char *>(BuildID.data()), BuildID.size()), + "\xAB\xB5\x0D\x82\xB6\xBD\xC8\x61"); +} + +// The code handles a malformed program header that points at data outside the +// file. +TEST(BuildIDTest, InvalidPhdrFileSizeNoShdrs) { + SmallString<0> Storage; + Expected<ELFObjectFile<ELF64LE>> ElfOrErr = + toBinary<ELF64LE>(Storage, getInvalidNoteELF(false)); + ASSERT_THAT_EXPECTED(ElfOrErr, Succeeded()); + BuildIDRef BuildID = getBuildID(&ElfOrErr.get()); + EXPECT_EQ( + StringRef(reinterpret_cast<const char *>(BuildID.data()), BuildID.size()), + ""); +} + +// The code handles a malformed section header that points at data outside the +// file. +TEST(BuildIDTest, InvalidSectionHeader) { + SmallString<0> Storage; + Expected<ELFObjectFile<ELF64LE>> ElfOrErr = toBinary<ELF64LE>(Storage, R"( +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC + Machine: EM_X86_64 +ProgramHeaders: + - Type: PT_NOTE + FirstSec: .note.gnu.build-id + LastSec: .note.gnu.build-id +Sections: + - Name: .note.gnu.build-id + Type: SHT_NOTE + AddressAlign: 0x04 + ShOffset: 0x1a1 + Notes: + - Name: "GNU" + Desc: "abb50d82b6bdc861" + Type: 3 +)"); + ASSERT_THAT_EXPECTED(ElfOrErr, Succeeded()); + BuildIDRef BuildID = getBuildID(&ElfOrErr.get()); + EXPECT_EQ( + StringRef(reinterpret_cast<const char *>(BuildID.data()), BuildID.size()), + "\xAB\xB5\x0D\x82\xB6\xBD\xC8\x61"); +} diff --git a/llvm/unittests/Object/CMakeLists.txt b/llvm/unittests/Object/CMakeLists.txt index 1343352..cd70a7b 100644 --- a/llvm/unittests/Object/CMakeLists.txt +++ b/llvm/unittests/Object/CMakeLists.txt @@ -7,6 +7,7 @@ set(LLVM_LINK_COMPONENTS add_llvm_unittest(ObjectTests ArchiveTest.cpp + BuildIDTest.cpp COFFObjectFileTest.cpp DXContainerTest.cpp ELFObjectFileTest.cpp diff --git a/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp b/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp index a943e7ac..b99d656 100644 --- a/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp +++ b/llvm/unittests/Transforms/Vectorize/VPlanHCFGTest.cpp @@ -203,7 +203,7 @@ TEST_F(VPlanHCFGTest, testVPInstructionToVPRecipesInner) { VPInstruction::BranchOnCond, {Plan->getOrAddLiveIn(ConstantInt::getTrue(F->getContext()))})); VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( - Plan, [](PHINode *P) { return nullptr; }, TLI); + *Plan, [](PHINode *P) { return nullptr; }, TLI); VPBlockBase *Entry = Plan->getEntry()->getEntryBasicBlock(); EXPECT_EQ(0u, Entry->getNumPredecessors()); diff --git a/llvm/unittests/Transforms/Vectorize/VPlanUncountableExitTest.cpp b/llvm/unittests/Transforms/Vectorize/VPlanUncountableExitTest.cpp index eb075e6..b89d378 100644 --- a/llvm/unittests/Transforms/Vectorize/VPlanUncountableExitTest.cpp +++ b/llvm/unittests/Transforms/Vectorize/VPlanUncountableExitTest.cpp @@ -48,7 +48,7 @@ TEST_F(VPUncountableExitTest, FindUncountableExitRecipes) { BasicBlock *LoopHeader = F->getEntryBlock().getSingleSuccessor(); auto Plan = buildVPlan(LoopHeader, /*HasUncountableExit=*/true); VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( - Plan, [](PHINode *P) { return nullptr; }, *TLI); + *Plan, [](PHINode *P) { return nullptr; }, *TLI); VPlanTransforms::runPass(VPlanTransforms::optimize, *Plan); SmallVector<VPRecipeBase *> Recipes; @@ -85,7 +85,7 @@ TEST_F(VPUncountableExitTest, NoUncountableExit) { BasicBlock *LoopHeader = F->getEntryBlock().getSingleSuccessor(); auto Plan = buildVPlan(LoopHeader); VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( - Plan, [](PHINode *P) { return nullptr; }, *TLI); + *Plan, [](PHINode *P) { return nullptr; }, *TLI); VPlanTransforms::runPass(VPlanTransforms::optimize, *Plan); SmallVector<VPRecipeBase *> Recipes; diff --git a/llvm/utils/TableGen/Common/CodeGenRegisters.cpp b/llvm/utils/TableGen/Common/CodeGenRegisters.cpp index e873b3e..294f3af 100644 --- a/llvm/utils/TableGen/Common/CodeGenRegisters.cpp +++ b/llvm/utils/TableGen/Common/CodeGenRegisters.cpp @@ -744,7 +744,7 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, RSI.insertRegSizeForMode(DefaultMode, RI); } - CopyCost = R->getValueAsInt("CopyCost"); + int CopyCostParsed = R->getValueAsInt("CopyCost"); Allocatable = R->getValueAsBit("isAllocatable"); AltOrderSelect = R->getValueAsString("AltOrderSelect"); int AllocationPriority = R->getValueAsInt("AllocationPriority"); @@ -757,6 +757,14 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, const BitsInit *TSF = R->getValueAsBitsInit("TSFlags"); for (auto [Idx, Bit] : enumerate(TSF->getBits())) TSFlags |= uint8_t(cast<BitInit>(Bit)->getValue()) << Idx; + + // Saturate negative costs to the maximum + if (CopyCostParsed < 0) + CopyCost = std::numeric_limits<uint8_t>::max(); + else if (!isUInt<8>(CopyCostParsed)) + PrintFatalError(R->getLoc(), "'CopyCost' must be an 8-bit value"); + + CopyCost = CopyCostParsed; } // Create an inferred register class that was missing from the .td files. diff --git a/llvm/utils/TableGen/Common/CodeGenRegisters.h b/llvm/utils/TableGen/Common/CodeGenRegisters.h index 81aa663..89dac12 100644 --- a/llvm/utils/TableGen/Common/CodeGenRegisters.h +++ b/llvm/utils/TableGen/Common/CodeGenRegisters.h @@ -359,7 +359,7 @@ public: StringRef Namespace; SmallVector<ValueTypeByHwMode, 4> VTs; RegSizeInfoByHwMode RSI; - int CopyCost; + uint8_t CopyCost; bool Allocatable; StringRef AltOrderSelect; uint8_t AllocationPriority; diff --git a/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/llvm/utils/TableGen/RegisterInfoEmitter.cpp index c4b1c5f5..a67a5a9 100644 --- a/llvm/utils/TableGen/RegisterInfoEmitter.cpp +++ b/llvm/utils/TableGen/RegisterInfoEmitter.cpp @@ -1083,14 +1083,13 @@ void RegisterInfoEmitter::runMCDesc(raw_ostream &OS) { std::string RCName = Order.empty() ? "nullptr" : RC.getName(); std::string RCBitsName = Order.empty() ? "nullptr" : RC.getName() + "Bits"; std::string RCBitsSize = Order.empty() ? "0" : "sizeof(" + RCBitsName + ")"; - assert(isInt<8>(RC.CopyCost) && "Copy cost too large."); uint32_t RegSize = 0; if (RC.RSI.isSimple()) RegSize = RC.RSI.getSimple().RegSize; OS << " { " << RCName << ", " << RCBitsName << ", " << RegClassStrings.get(RC.getName()) << ", " << RC.getOrder().size() << ", " << RCBitsSize << ", " << RC.getQualifiedIdName() << ", " - << RegSize << ", " << RC.CopyCost << ", " + << RegSize << ", " << static_cast<unsigned>(RC.CopyCost) << ", " << (RC.Allocatable ? "true" : "false") << ", " << (RC.getBaseClassOrder() ? "true" : "false") << " },\n"; } diff --git a/llvm/utils/gn/secondary/bolt/lib/Rewrite/BUILD.gn b/llvm/utils/gn/secondary/bolt/lib/Rewrite/BUILD.gn index b856d1c..764ebb9 100644 --- a/llvm/utils/gn/secondary/bolt/lib/Rewrite/BUILD.gn +++ b/llvm/utils/gn/secondary/bolt/lib/Rewrite/BUILD.gn @@ -28,6 +28,7 @@ static_library("Rewrite") { "BuildIDRewriter.cpp", "DWARFRewriter.cpp", "ExecutableFileMemoryManager.cpp", + "GNUPropertyRewriter.cpp", "JITLinkLinker.cpp", "LinuxKernelRewriter.cpp", "MachORewriteInstance.cpp", diff --git a/llvm/utils/gn/secondary/llvm/unittests/Object/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/Object/BUILD.gn index 9fcb05c..54193c8 100644 --- a/llvm/utils/gn/secondary/llvm/unittests/Object/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/unittests/Object/BUILD.gn @@ -10,6 +10,7 @@ unittest("ObjectTests") { ] sources = [ "ArchiveTest.cpp", + "BuildIDTest.cpp", "COFFObjectFileTest.cpp", "DXContainerTest.cpp", "ELFObjectFileTest.cpp", |